##// END OF EJS Templates
add a fix for issue 1175...
Matt Mackall -
r6875:0d714a48 default
parent child Browse files
Show More
@@ -0,0 +1,20 b''
1 #!/bin/sh
2 rm -rf a
3 hg init a
4 cd a
5 touch a
6 hg ci -Am0
7 hg mv a a1
8 hg ci -m1
9 hg co 0
10 hg mv a a2
11 hg up
12 hg ci -m2
13
14 touch a
15 hg ci -Am3
16 hg mv a b
17 hg ci -Am4 a
18 hg ci --debug --traceback -Am5 b
19 hg verify
20 hg export --git tip
@@ -0,0 +1,24 b''
1 adding a
2 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
3 warning: detected divergent renames of a to:
4 a2
5 a1
6 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 adding a
8 b
9 b: searching for copy revision for a
10 b: copy a:b80de5d138758541c5f05265ad144ab9fa86d1db
11 checking changesets
12 checking manifests
13 crosschecking files in changesets and manifests
14 checking files
15 4 files, 6 changesets, 4 total revisions
16 # HG changeset patch
17 # User test
18 # Date 0 0
19 # Node ID 755e75751bf67eb4378bca61987df035d90a7a06
20 # Parent 7399822c2e395fe7d57c2fcf4b310f6fb22f8c2d
21 5
22
23 diff --git a/b b/b
24 new file mode 100644
@@ -1,2136 +1,2154 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, names, node, message, local, user, date, parent=None,
127 def _tag(self, names, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 if isinstance(names, str):
131 if isinstance(names, str):
132 allchars = names
132 allchars = names
133 names = (names,)
133 names = (names,)
134 else:
134 else:
135 allchars = ''.join(names)
135 allchars = ''.join(names)
136 for c in self.tag_disallowed:
136 for c in self.tag_disallowed:
137 if c in allchars:
137 if c in allchars:
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139
139
140 for name in names:
140 for name in names:
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 local=local)
142 local=local)
143
143
144 def writetags(fp, names, munge, prevtags):
144 def writetags(fp, names, munge, prevtags):
145 fp.seek(0, 2)
145 fp.seek(0, 2)
146 if prevtags and prevtags[-1] != '\n':
146 if prevtags and prevtags[-1] != '\n':
147 fp.write('\n')
147 fp.write('\n')
148 for name in names:
148 for name in names:
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 fp.close()
150 fp.close()
151
151
152 prevtags = ''
152 prevtags = ''
153 if local:
153 if local:
154 try:
154 try:
155 fp = self.opener('localtags', 'r+')
155 fp = self.opener('localtags', 'r+')
156 except IOError, err:
156 except IOError, err:
157 fp = self.opener('localtags', 'a')
157 fp = self.opener('localtags', 'a')
158 else:
158 else:
159 prevtags = fp.read()
159 prevtags = fp.read()
160
160
161 # local tags are stored in the current charset
161 # local tags are stored in the current charset
162 writetags(fp, names, None, prevtags)
162 writetags(fp, names, None, prevtags)
163 for name in names:
163 for name in names:
164 self.hook('tag', node=hex(node), tag=name, local=local)
164 self.hook('tag', node=hex(node), tag=name, local=local)
165 return
165 return
166
166
167 if use_dirstate:
167 if use_dirstate:
168 try:
168 try:
169 fp = self.wfile('.hgtags', 'rb+')
169 fp = self.wfile('.hgtags', 'rb+')
170 except IOError, err:
170 except IOError, err:
171 fp = self.wfile('.hgtags', 'ab')
171 fp = self.wfile('.hgtags', 'ab')
172 else:
172 else:
173 prevtags = fp.read()
173 prevtags = fp.read()
174 else:
174 else:
175 try:
175 try:
176 prevtags = self.filectx('.hgtags', parent).data()
176 prevtags = self.filectx('.hgtags', parent).data()
177 except revlog.LookupError:
177 except revlog.LookupError:
178 pass
178 pass
179 fp = self.wfile('.hgtags', 'wb')
179 fp = self.wfile('.hgtags', 'wb')
180 if prevtags:
180 if prevtags:
181 fp.write(prevtags)
181 fp.write(prevtags)
182
182
183 # committed tags are stored in UTF-8
183 # committed tags are stored in UTF-8
184 writetags(fp, names, util.fromlocal, prevtags)
184 writetags(fp, names, util.fromlocal, prevtags)
185
185
186 if use_dirstate and '.hgtags' not in self.dirstate:
186 if use_dirstate and '.hgtags' not in self.dirstate:
187 self.add(['.hgtags'])
187 self.add(['.hgtags'])
188
188
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 extra=extra)
190 extra=extra)
191
191
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194
194
195 return tagnode
195 return tagnode
196
196
197 def tag(self, names, node, message, local, user, date):
197 def tag(self, names, node, message, local, user, date):
198 '''tag a revision with one or more symbolic names.
198 '''tag a revision with one or more symbolic names.
199
199
200 names is a list of strings or, when adding a single tag, names may be a
200 names is a list of strings or, when adding a single tag, names may be a
201 string.
201 string.
202
202
203 if local is True, the tags are stored in a per-repository file.
203 if local is True, the tags are stored in a per-repository file.
204 otherwise, they are stored in the .hgtags file, and a new
204 otherwise, they are stored in the .hgtags file, and a new
205 changeset is committed with the change.
205 changeset is committed with the change.
206
206
207 keyword arguments:
207 keyword arguments:
208
208
209 local: whether to store tags in non-version-controlled file
209 local: whether to store tags in non-version-controlled file
210 (default False)
210 (default False)
211
211
212 message: commit message to use if committing
212 message: commit message to use if committing
213
213
214 user: name of user to use if committing
214 user: name of user to use if committing
215
215
216 date: date tuple to use if committing'''
216 date: date tuple to use if committing'''
217
217
218 for x in self.status()[:5]:
218 for x in self.status()[:5]:
219 if '.hgtags' in x:
219 if '.hgtags' in x:
220 raise util.Abort(_('working copy of .hgtags is changed '
220 raise util.Abort(_('working copy of .hgtags is changed '
221 '(please commit .hgtags manually)'))
221 '(please commit .hgtags manually)'))
222
222
223 self._tag(names, node, message, local, user, date)
223 self._tag(names, node, message, local, user, date)
224
224
225 def tags(self):
225 def tags(self):
226 '''return a mapping of tag to node'''
226 '''return a mapping of tag to node'''
227 if self.tagscache:
227 if self.tagscache:
228 return self.tagscache
228 return self.tagscache
229
229
230 globaltags = {}
230 globaltags = {}
231 tagtypes = {}
231 tagtypes = {}
232
232
233 def readtags(lines, fn, tagtype):
233 def readtags(lines, fn, tagtype):
234 filetags = {}
234 filetags = {}
235 count = 0
235 count = 0
236
236
237 def warn(msg):
237 def warn(msg):
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239
239
240 for l in lines:
240 for l in lines:
241 count += 1
241 count += 1
242 if not l:
242 if not l:
243 continue
243 continue
244 s = l.split(" ", 1)
244 s = l.split(" ", 1)
245 if len(s) != 2:
245 if len(s) != 2:
246 warn(_("cannot parse entry"))
246 warn(_("cannot parse entry"))
247 continue
247 continue
248 node, key = s
248 node, key = s
249 key = util.tolocal(key.strip()) # stored in UTF-8
249 key = util.tolocal(key.strip()) # stored in UTF-8
250 try:
250 try:
251 bin_n = bin(node)
251 bin_n = bin(node)
252 except TypeError:
252 except TypeError:
253 warn(_("node '%s' is not well formed") % node)
253 warn(_("node '%s' is not well formed") % node)
254 continue
254 continue
255 if bin_n not in self.changelog.nodemap:
255 if bin_n not in self.changelog.nodemap:
256 warn(_("tag '%s' refers to unknown node") % key)
256 warn(_("tag '%s' refers to unknown node") % key)
257 continue
257 continue
258
258
259 h = []
259 h = []
260 if key in filetags:
260 if key in filetags:
261 n, h = filetags[key]
261 n, h = filetags[key]
262 h.append(n)
262 h.append(n)
263 filetags[key] = (bin_n, h)
263 filetags[key] = (bin_n, h)
264
264
265 for k, nh in filetags.items():
265 for k, nh in filetags.items():
266 if k not in globaltags:
266 if k not in globaltags:
267 globaltags[k] = nh
267 globaltags[k] = nh
268 tagtypes[k] = tagtype
268 tagtypes[k] = tagtype
269 continue
269 continue
270
270
271 # we prefer the global tag if:
271 # we prefer the global tag if:
272 # it supercedes us OR
272 # it supercedes us OR
273 # mutual supercedes and it has a higher rank
273 # mutual supercedes and it has a higher rank
274 # otherwise we win because we're tip-most
274 # otherwise we win because we're tip-most
275 an, ah = nh
275 an, ah = nh
276 bn, bh = globaltags[k]
276 bn, bh = globaltags[k]
277 if (bn != an and an in bh and
277 if (bn != an and an in bh and
278 (bn not in ah or len(bh) > len(ah))):
278 (bn not in ah or len(bh) > len(ah))):
279 an = bn
279 an = bn
280 ah.extend([n for n in bh if n not in ah])
280 ah.extend([n for n in bh if n not in ah])
281 globaltags[k] = an, ah
281 globaltags[k] = an, ah
282 tagtypes[k] = tagtype
282 tagtypes[k] = tagtype
283
283
284 # read the tags file from each head, ending with the tip
284 # read the tags file from each head, ending with the tip
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 readtags(f.data().splitlines(), f, "global")
289 readtags(f.data().splitlines(), f, "global")
290
290
291 try:
291 try:
292 data = util.fromlocal(self.opener("localtags").read())
292 data = util.fromlocal(self.opener("localtags").read())
293 # localtags are stored in the local character set
293 # localtags are stored in the local character set
294 # while the internal tag table is stored in UTF-8
294 # while the internal tag table is stored in UTF-8
295 readtags(data.splitlines(), "localtags", "local")
295 readtags(data.splitlines(), "localtags", "local")
296 except IOError:
296 except IOError:
297 pass
297 pass
298
298
299 self.tagscache = {}
299 self.tagscache = {}
300 self._tagstypecache = {}
300 self._tagstypecache = {}
301 for k,nh in globaltags.items():
301 for k,nh in globaltags.items():
302 n = nh[0]
302 n = nh[0]
303 if n != nullid:
303 if n != nullid:
304 self.tagscache[k] = n
304 self.tagscache[k] = n
305 self._tagstypecache[k] = tagtypes[k]
305 self._tagstypecache[k] = tagtypes[k]
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self.changectx(node)
329 c = self.changectx(node)
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 l.sort()
350 l.sort()
351 return [(t, n) for r, t, n in l]
351 return [(t, n) for r, t, n in l]
352
352
353 def nodetags(self, node):
353 def nodetags(self, node):
354 '''return the tags associated with a node'''
354 '''return the tags associated with a node'''
355 if not self.nodetagscache:
355 if not self.nodetagscache:
356 self.nodetagscache = {}
356 self.nodetagscache = {}
357 for t, n in self.tags().items():
357 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
358 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
359 return self.nodetagscache.get(node, [])
360
360
361 def _branchtags(self, partial, lrev):
361 def _branchtags(self, partial, lrev):
362 tiprev = self.changelog.count() - 1
362 tiprev = self.changelog.count() - 1
363 if lrev != tiprev:
363 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
366
367 return partial
367 return partial
368
368
369 def branchtags(self):
369 def branchtags(self):
370 tip = self.changelog.tip()
370 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
371 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
372 return self.branchcache
373
373
374 oldtip = self._branchcachetip
374 oldtip = self._branchcachetip
375 self._branchcachetip = tip
375 self._branchcachetip = tip
376 if self.branchcache is None:
376 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
377 self.branchcache = {} # avoid recursion in changectx
378 else:
378 else:
379 self.branchcache.clear() # keep using the same dict
379 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
381 partial, last, lrev = self._readbranchcache()
382 else:
382 else:
383 lrev = self.changelog.rev(oldtip)
383 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
384 partial = self._ubranchcache
385
385
386 self._branchtags(partial, lrev)
386 self._branchtags(partial, lrev)
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
392 self._ubranchcache = partial
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branch.cache")
398 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 except (IOError, OSError):
401 except (IOError, OSError):
402 return {}, nullid, nullrev
402 return {}, nullid, nullrev
403
403
404 try:
404 try:
405 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
406 last, lrev = bin(last), int(lrev)
407 if not (lrev < self.changelog.count() and
407 if not (lrev < self.changelog.count() and
408 self.changelog.node(lrev) == last): # sanity check
408 self.changelog.node(lrev) == last): # sanity check
409 # invalidate the cache
409 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
410 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
411 for l in lines:
412 if not l: continue
412 if not l: continue
413 node, label = l.split(" ", 1)
413 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
414 partial[label.strip()] = bin(node)
415 except (KeyboardInterrupt, util.SignalInterrupt):
415 except (KeyboardInterrupt, util.SignalInterrupt):
416 raise
416 raise
417 except Exception, inst:
417 except Exception, inst:
418 if self.ui.debugflag:
418 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
419 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
420 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
421 return partial, last, lrev
422
422
423 def _writebranchcache(self, branches, tip, tiprev):
423 def _writebranchcache(self, branches, tip, tiprev):
424 try:
424 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
425 f = self.opener("branch.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
426 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
427 for label, node in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
428 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
429 f.rename()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 pass
431 pass
432
432
433 def _updatebranchcache(self, partial, start, end):
433 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
434 for r in xrange(start, end):
435 c = self.changectx(r)
435 c = self.changectx(r)
436 b = c.branch()
436 b = c.branch()
437 partial[b] = c.node()
437 partial[b] = c.node()
438
438
439 def lookup(self, key):
439 def lookup(self, key):
440 if key == '.':
440 if key == '.':
441 key, second = self.dirstate.parents()
441 key, second = self.dirstate.parents()
442 if key == nullid:
442 if key == nullid:
443 raise repo.RepoError(_("no revision checked out"))
443 raise repo.RepoError(_("no revision checked out"))
444 if second != nullid:
444 if second != nullid:
445 self.ui.warn(_("warning: working directory has two parents, "
445 self.ui.warn(_("warning: working directory has two parents, "
446 "tag '.' uses the first\n"))
446 "tag '.' uses the first\n"))
447 elif key == 'null':
447 elif key == 'null':
448 return nullid
448 return nullid
449 n = self.changelog._match(key)
449 n = self.changelog._match(key)
450 if n:
450 if n:
451 return n
451 return n
452 if key in self.tags():
452 if key in self.tags():
453 return self.tags()[key]
453 return self.tags()[key]
454 if key in self.branchtags():
454 if key in self.branchtags():
455 return self.branchtags()[key]
455 return self.branchtags()[key]
456 n = self.changelog._partialmatch(key)
456 n = self.changelog._partialmatch(key)
457 if n:
457 if n:
458 return n
458 return n
459 try:
459 try:
460 if len(key) == 20:
460 if len(key) == 20:
461 key = hex(key)
461 key = hex(key)
462 except:
462 except:
463 pass
463 pass
464 raise repo.RepoError(_("unknown revision '%s'") % key)
464 raise repo.RepoError(_("unknown revision '%s'") % key)
465
465
466 def local(self):
466 def local(self):
467 return True
467 return True
468
468
469 def join(self, f):
469 def join(self, f):
470 return os.path.join(self.path, f)
470 return os.path.join(self.path, f)
471
471
472 def sjoin(self, f):
472 def sjoin(self, f):
473 f = self.encodefn(f)
473 f = self.encodefn(f)
474 return os.path.join(self.spath, f)
474 return os.path.join(self.spath, f)
475
475
476 def wjoin(self, f):
476 def wjoin(self, f):
477 return os.path.join(self.root, f)
477 return os.path.join(self.root, f)
478
478
479 def file(self, f):
479 def file(self, f):
480 if f[0] == '/':
480 if f[0] == '/':
481 f = f[1:]
481 f = f[1:]
482 return filelog.filelog(self.sopener, f)
482 return filelog.filelog(self.sopener, f)
483
483
484 def changectx(self, changeid=None):
484 def changectx(self, changeid=None):
485 return context.changectx(self, changeid)
485 return context.changectx(self, changeid)
486
486
487 def workingctx(self):
487 def workingctx(self):
488 return context.workingctx(self)
488 return context.workingctx(self)
489
489
490 def parents(self, changeid=None):
490 def parents(self, changeid=None):
491 '''
491 '''
492 get list of changectxs for parents of changeid or working directory
492 get list of changectxs for parents of changeid or working directory
493 '''
493 '''
494 if changeid is None:
494 if changeid is None:
495 pl = self.dirstate.parents()
495 pl = self.dirstate.parents()
496 else:
496 else:
497 n = self.changelog.lookup(changeid)
497 n = self.changelog.lookup(changeid)
498 pl = self.changelog.parents(n)
498 pl = self.changelog.parents(n)
499 if pl[1] == nullid:
499 if pl[1] == nullid:
500 return [self.changectx(pl[0])]
500 return [self.changectx(pl[0])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502
502
503 def filectx(self, path, changeid=None, fileid=None):
503 def filectx(self, path, changeid=None, fileid=None):
504 """changeid can be a changeset revision, node, or tag.
504 """changeid can be a changeset revision, node, or tag.
505 fileid can be a file revision or node."""
505 fileid can be a file revision or node."""
506 return context.filectx(self, path, changeid, fileid)
506 return context.filectx(self, path, changeid, fileid)
507
507
508 def getcwd(self):
508 def getcwd(self):
509 return self.dirstate.getcwd()
509 return self.dirstate.getcwd()
510
510
511 def pathto(self, f, cwd=None):
511 def pathto(self, f, cwd=None):
512 return self.dirstate.pathto(f, cwd)
512 return self.dirstate.pathto(f, cwd)
513
513
514 def wfile(self, f, mode='r'):
514 def wfile(self, f, mode='r'):
515 return self.wopener(f, mode)
515 return self.wopener(f, mode)
516
516
517 def _link(self, f):
517 def _link(self, f):
518 return os.path.islink(self.wjoin(f))
518 return os.path.islink(self.wjoin(f))
519
519
520 def _filter(self, filter, filename, data):
520 def _filter(self, filter, filename, data):
521 if filter not in self.filterpats:
521 if filter not in self.filterpats:
522 l = []
522 l = []
523 for pat, cmd in self.ui.configitems(filter):
523 for pat, cmd in self.ui.configitems(filter):
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 fn = None
525 fn = None
526 params = cmd
526 params = cmd
527 for name, filterfn in self._datafilters.iteritems():
527 for name, filterfn in self._datafilters.iteritems():
528 if cmd.startswith(name):
528 if cmd.startswith(name):
529 fn = filterfn
529 fn = filterfn
530 params = cmd[len(name):].lstrip()
530 params = cmd[len(name):].lstrip()
531 break
531 break
532 if not fn:
532 if not fn:
533 fn = lambda s, c, **kwargs: util.filter(s, c)
533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 # Wrap old filters not supporting keyword arguments
534 # Wrap old filters not supporting keyword arguments
535 if not inspect.getargspec(fn)[2]:
535 if not inspect.getargspec(fn)[2]:
536 oldfn = fn
536 oldfn = fn
537 fn = lambda s, c, **kwargs: oldfn(s, c)
537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 l.append((mf, fn, params))
538 l.append((mf, fn, params))
539 self.filterpats[filter] = l
539 self.filterpats[filter] = l
540
540
541 for mf, fn, cmd in self.filterpats[filter]:
541 for mf, fn, cmd in self.filterpats[filter]:
542 if mf(filename):
542 if mf(filename):
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 break
545 break
546
546
547 return data
547 return data
548
548
549 def adddatafilter(self, name, filter):
549 def adddatafilter(self, name, filter):
550 self._datafilters[name] = filter
550 self._datafilters[name] = filter
551
551
552 def wread(self, filename):
552 def wread(self, filename):
553 if self._link(filename):
553 if self._link(filename):
554 data = os.readlink(self.wjoin(filename))
554 data = os.readlink(self.wjoin(filename))
555 else:
555 else:
556 data = self.wopener(filename, 'r').read()
556 data = self.wopener(filename, 'r').read()
557 return self._filter("encode", filename, data)
557 return self._filter("encode", filename, data)
558
558
559 def wwrite(self, filename, data, flags):
559 def wwrite(self, filename, data, flags):
560 data = self._filter("decode", filename, data)
560 data = self._filter("decode", filename, data)
561 try:
561 try:
562 os.unlink(self.wjoin(filename))
562 os.unlink(self.wjoin(filename))
563 except OSError:
563 except OSError:
564 pass
564 pass
565 self.wopener(filename, 'w').write(data)
565 self.wopener(filename, 'w').write(data)
566 util.set_flags(self.wjoin(filename), flags)
566 util.set_flags(self.wjoin(filename), flags)
567
567
568 def wwritedata(self, filename, data):
568 def wwritedata(self, filename, data):
569 return self._filter("decode", filename, data)
569 return self._filter("decode", filename, data)
570
570
571 def transaction(self):
571 def transaction(self):
572 if self._transref and self._transref():
572 if self._transref and self._transref():
573 return self._transref().nest()
573 return self._transref().nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise repo.RepoError(_("journal already exists - run hg recover"))
577 raise repo.RepoError(_("journal already exists - run hg recover"))
578
578
579 # save dirstate for rollback
579 # save dirstate for rollback
580 try:
580 try:
581 ds = self.opener("dirstate").read()
581 ds = self.opener("dirstate").read()
582 except IOError:
582 except IOError:
583 ds = ""
583 ds = ""
584 self.opener("journal.dirstate", "w").write(ds)
584 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
586
586
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
589 (self.join("journal.branch"), self.join("undo.branch"))]
589 (self.join("journal.branch"), self.join("undo.branch"))]
590 tr = transaction.transaction(self.ui.warn, self.sopener,
590 tr = transaction.transaction(self.ui.warn, self.sopener,
591 self.sjoin("journal"),
591 self.sjoin("journal"),
592 aftertrans(renames),
592 aftertrans(renames),
593 self._createmode)
593 self._createmode)
594 self._transref = weakref.ref(tr)
594 self._transref = weakref.ref(tr)
595 return tr
595 return tr
596
596
597 def recover(self):
597 def recover(self):
598 l = self.lock()
598 l = self.lock()
599 try:
599 try:
600 if os.path.exists(self.sjoin("journal")):
600 if os.path.exists(self.sjoin("journal")):
601 self.ui.status(_("rolling back interrupted transaction\n"))
601 self.ui.status(_("rolling back interrupted transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
602 transaction.rollback(self.sopener, self.sjoin("journal"))
603 self.invalidate()
603 self.invalidate()
604 return True
604 return True
605 else:
605 else:
606 self.ui.warn(_("no interrupted transaction available\n"))
606 self.ui.warn(_("no interrupted transaction available\n"))
607 return False
607 return False
608 finally:
608 finally:
609 del l
609 del l
610
610
611 def rollback(self):
611 def rollback(self):
612 wlock = lock = None
612 wlock = lock = None
613 try:
613 try:
614 wlock = self.wlock()
614 wlock = self.wlock()
615 lock = self.lock()
615 lock = self.lock()
616 if os.path.exists(self.sjoin("undo")):
616 if os.path.exists(self.sjoin("undo")):
617 self.ui.status(_("rolling back last transaction\n"))
617 self.ui.status(_("rolling back last transaction\n"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
618 transaction.rollback(self.sopener, self.sjoin("undo"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
620 try:
620 try:
621 branch = self.opener("undo.branch").read()
621 branch = self.opener("undo.branch").read()
622 self.dirstate.setbranch(branch)
622 self.dirstate.setbranch(branch)
623 except IOError:
623 except IOError:
624 self.ui.warn(_("Named branch could not be reset, "
624 self.ui.warn(_("Named branch could not be reset, "
625 "current branch still is: %s\n")
625 "current branch still is: %s\n")
626 % util.tolocal(self.dirstate.branch()))
626 % util.tolocal(self.dirstate.branch()))
627 self.invalidate()
627 self.invalidate()
628 self.dirstate.invalidate()
628 self.dirstate.invalidate()
629 else:
629 else:
630 self.ui.warn(_("no rollback information available\n"))
630 self.ui.warn(_("no rollback information available\n"))
631 finally:
631 finally:
632 del lock, wlock
632 del lock, wlock
633
633
634 def invalidate(self):
634 def invalidate(self):
635 for a in "changelog manifest".split():
635 for a in "changelog manifest".split():
636 if a in self.__dict__:
636 if a in self.__dict__:
637 delattr(self, a)
637 delattr(self, a)
638 self.tagscache = None
638 self.tagscache = None
639 self._tagstypecache = None
639 self._tagstypecache = None
640 self.nodetagscache = None
640 self.nodetagscache = None
641 self.branchcache = None
641 self.branchcache = None
642 self._ubranchcache = None
642 self._ubranchcache = None
643 self._branchcachetip = None
643 self._branchcachetip = None
644
644
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 try:
646 try:
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
647 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 except lock.LockHeld, inst:
648 except lock.LockHeld, inst:
649 if not wait:
649 if not wait:
650 raise
650 raise
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 (desc, inst.locker))
652 (desc, inst.locker))
653 # default to 600 seconds timeout
653 # default to 600 seconds timeout
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 releasefn, desc=desc)
655 releasefn, desc=desc)
656 if acquirefn:
656 if acquirefn:
657 acquirefn()
657 acquirefn()
658 return l
658 return l
659
659
660 def lock(self, wait=True):
660 def lock(self, wait=True):
661 if self._lockref and self._lockref():
661 if self._lockref and self._lockref():
662 return self._lockref()
662 return self._lockref()
663
663
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
665 _('repository %s') % self.origroot)
665 _('repository %s') % self.origroot)
666 self._lockref = weakref.ref(l)
666 self._lockref = weakref.ref(l)
667 return l
667 return l
668
668
669 def wlock(self, wait=True):
669 def wlock(self, wait=True):
670 if self._wlockref and self._wlockref():
670 if self._wlockref and self._wlockref():
671 return self._wlockref()
671 return self._wlockref()
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 t = self.wread(fn)
684 t = self.wread(fn)
685 fl = self.file(fn)
685 fl = self.file(fn)
686 fp1 = manifest1.get(fn, nullid)
686 fp1 = manifest1.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
687 fp2 = manifest2.get(fn, nullid)
688
688
689 meta = {}
689 meta = {}
690 cf = self.dirstate.copied(fn)
690 cf = self.dirstate.copied(fn)
691 if cf and cf != fn:
691 if cf and cf != fn:
692 # Mark the new revision of this file as a copy of another
692 # Mark the new revision of this file as a copy of another
693 # file. This copy data will effectively act as a parent
693 # file. This copy data will effectively act as a parent
694 # of this new revision. If this is a merge, the first
694 # of this new revision. If this is a merge, the first
695 # parent will be the nullid (meaning "look up the copy data")
695 # parent will be the nullid (meaning "look up the copy data")
696 # and the second one will be the other parent. For example:
696 # and the second one will be the other parent. For example:
697 #
697 #
698 # 0 --- 1 --- 3 rev1 changes file foo
698 # 0 --- 1 --- 3 rev1 changes file foo
699 # \ / rev2 renames foo to bar and changes it
699 # \ / rev2 renames foo to bar and changes it
700 # \- 2 -/ rev3 should have bar with all changes and
700 # \- 2 -/ rev3 should have bar with all changes and
701 # should record that bar descends from
701 # should record that bar descends from
702 # bar in rev2 and foo in rev1
702 # bar in rev2 and foo in rev1
703 #
703 #
704 # this allows this merge to succeed:
704 # this allows this merge to succeed:
705 #
705 #
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
707 # \ / merging rev3 and rev4 should use bar@rev2
708 # \- 2 --- 4 as the merge base
708 # \- 2 --- 4 as the merge base
709 #
709 #
710
710
711 cr = manifest1.get(cf, nullid)
711 cr = manifest1.get(cf)
712 nfp = fp2
712 nfp = fp2
713
713
714 if manifest2: # branch merge
714 if manifest2: # branch merge
715 if fp2 == nullid: # copied on remote side
715 if fp2 == nullid: # copied on remote side
716 if fp1 != nullid or cf in manifest2:
716 if fp1 != nullid or cf in manifest2:
717 cr = manifest2[cf]
717 cr = manifest2[cf]
718 nfp = fp1
718 nfp = fp1
719
719
720 # find source in nearest ancestor if we've lost track
721 if not cr:
722 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
723 (fn, cf))
724 p1 = self.dirstate.parents()[0]
725 rev = self.changelog.rev(p1)
726 seen = {-1:None}
727 visit = [rev]
728 while visit:
729 for p in self.changelog.parentrevs(visit.pop(0)):
730 if p not in seen:
731 seen[p] = True
732 visit.append(p)
733 ctx = self.changectx(p)
734 if cf in ctx:
735 cr = ctx[cf].filenode()
736 break
737
720 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
738 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
721 meta["copy"] = cf
739 meta["copy"] = cf
722 meta["copyrev"] = hex(cr)
740 meta["copyrev"] = hex(cr)
723 fp1, fp2 = nullid, nfp
741 fp1, fp2 = nullid, nfp
724 elif fp2 != nullid:
742 elif fp2 != nullid:
725 # is one parent an ancestor of the other?
743 # is one parent an ancestor of the other?
726 fpa = fl.ancestor(fp1, fp2)
744 fpa = fl.ancestor(fp1, fp2)
727 if fpa == fp1:
745 if fpa == fp1:
728 fp1, fp2 = fp2, nullid
746 fp1, fp2 = fp2, nullid
729 elif fpa == fp2:
747 elif fpa == fp2:
730 fp2 = nullid
748 fp2 = nullid
731
749
732 # is the file unmodified from the parent? report existing entry
750 # is the file unmodified from the parent? report existing entry
733 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
751 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
734 return fp1
752 return fp1
735
753
736 changelist.append(fn)
754 changelist.append(fn)
737 return fl.add(t, meta, tr, linkrev, fp1, fp2)
755 return fl.add(t, meta, tr, linkrev, fp1, fp2)
738
756
739 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
757 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
740 if p1 is None:
758 if p1 is None:
741 p1, p2 = self.dirstate.parents()
759 p1, p2 = self.dirstate.parents()
742 return self.commit(files=files, text=text, user=user, date=date,
760 return self.commit(files=files, text=text, user=user, date=date,
743 p1=p1, p2=p2, extra=extra, empty_ok=True)
761 p1=p1, p2=p2, extra=extra, empty_ok=True)
744
762
745 def commit(self, files=None, text="", user=None, date=None,
763 def commit(self, files=None, text="", user=None, date=None,
746 match=util.always, force=False, force_editor=False,
764 match=util.always, force=False, force_editor=False,
747 p1=None, p2=None, extra={}, empty_ok=False):
765 p1=None, p2=None, extra={}, empty_ok=False):
748 wlock = lock = tr = None
766 wlock = lock = tr = None
749 valid = 0 # don't save the dirstate if this isn't set
767 valid = 0 # don't save the dirstate if this isn't set
750 if files:
768 if files:
751 files = util.unique(files)
769 files = util.unique(files)
752 try:
770 try:
753 wlock = self.wlock()
771 wlock = self.wlock()
754 lock = self.lock()
772 lock = self.lock()
755 commit = []
773 commit = []
756 remove = []
774 remove = []
757 changed = []
775 changed = []
758 use_dirstate = (p1 is None) # not rawcommit
776 use_dirstate = (p1 is None) # not rawcommit
759 extra = extra.copy()
777 extra = extra.copy()
760
778
761 if use_dirstate:
779 if use_dirstate:
762 if files:
780 if files:
763 for f in files:
781 for f in files:
764 s = self.dirstate[f]
782 s = self.dirstate[f]
765 if s in 'nma':
783 if s in 'nma':
766 commit.append(f)
784 commit.append(f)
767 elif s == 'r':
785 elif s == 'r':
768 remove.append(f)
786 remove.append(f)
769 else:
787 else:
770 self.ui.warn(_("%s not tracked!\n") % f)
788 self.ui.warn(_("%s not tracked!\n") % f)
771 else:
789 else:
772 changes = self.status(match=match)[:5]
790 changes = self.status(match=match)[:5]
773 modified, added, removed, deleted, unknown = changes
791 modified, added, removed, deleted, unknown = changes
774 commit = modified + added
792 commit = modified + added
775 remove = removed
793 remove = removed
776 else:
794 else:
777 commit = files
795 commit = files
778
796
779 if use_dirstate:
797 if use_dirstate:
780 p1, p2 = self.dirstate.parents()
798 p1, p2 = self.dirstate.parents()
781 update_dirstate = True
799 update_dirstate = True
782
800
783 if (not force and p2 != nullid and
801 if (not force and p2 != nullid and
784 (files or match != util.always)):
802 (files or match != util.always)):
785 raise util.Abort(_('cannot partially commit a merge '
803 raise util.Abort(_('cannot partially commit a merge '
786 '(do not specify files or patterns)'))
804 '(do not specify files or patterns)'))
787 else:
805 else:
788 p1, p2 = p1, p2 or nullid
806 p1, p2 = p1, p2 or nullid
789 update_dirstate = (self.dirstate.parents()[0] == p1)
807 update_dirstate = (self.dirstate.parents()[0] == p1)
790
808
791 c1 = self.changelog.read(p1)
809 c1 = self.changelog.read(p1)
792 c2 = self.changelog.read(p2)
810 c2 = self.changelog.read(p2)
793 m1 = self.manifest.read(c1[0]).copy()
811 m1 = self.manifest.read(c1[0]).copy()
794 m2 = self.manifest.read(c2[0])
812 m2 = self.manifest.read(c2[0])
795
813
796 if use_dirstate:
814 if use_dirstate:
797 branchname = self.workingctx().branch()
815 branchname = self.workingctx().branch()
798 try:
816 try:
799 branchname = branchname.decode('UTF-8').encode('UTF-8')
817 branchname = branchname.decode('UTF-8').encode('UTF-8')
800 except UnicodeDecodeError:
818 except UnicodeDecodeError:
801 raise util.Abort(_('branch name not in UTF-8!'))
819 raise util.Abort(_('branch name not in UTF-8!'))
802 else:
820 else:
803 branchname = ""
821 branchname = ""
804
822
805 if use_dirstate:
823 if use_dirstate:
806 oldname = c1[5].get("branch") # stored in UTF-8
824 oldname = c1[5].get("branch") # stored in UTF-8
807 if (not commit and not remove and not force and p2 == nullid
825 if (not commit and not remove and not force and p2 == nullid
808 and branchname == oldname):
826 and branchname == oldname):
809 self.ui.status(_("nothing changed\n"))
827 self.ui.status(_("nothing changed\n"))
810 return None
828 return None
811
829
812 xp1 = hex(p1)
830 xp1 = hex(p1)
813 if p2 == nullid: xp2 = ''
831 if p2 == nullid: xp2 = ''
814 else: xp2 = hex(p2)
832 else: xp2 = hex(p2)
815
833
816 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
834 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
817
835
818 tr = self.transaction()
836 tr = self.transaction()
819 trp = weakref.proxy(tr)
837 trp = weakref.proxy(tr)
820
838
821 # check in files
839 # check in files
822 new = {}
840 new = {}
823 linkrev = self.changelog.count()
841 linkrev = self.changelog.count()
824 commit.sort()
842 commit.sort()
825 is_exec = util.execfunc(self.root, m1.execf)
843 is_exec = util.execfunc(self.root, m1.execf)
826 is_link = util.linkfunc(self.root, m1.linkf)
844 is_link = util.linkfunc(self.root, m1.linkf)
827 for f in commit:
845 for f in commit:
828 self.ui.note(f + "\n")
846 self.ui.note(f + "\n")
829 try:
847 try:
830 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
848 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
831 new_exec = is_exec(f)
849 new_exec = is_exec(f)
832 new_link = is_link(f)
850 new_link = is_link(f)
833 if ((not changed or changed[-1] != f) and
851 if ((not changed or changed[-1] != f) and
834 m2.get(f) != new[f]):
852 m2.get(f) != new[f]):
835 # mention the file in the changelog if some
853 # mention the file in the changelog if some
836 # flag changed, even if there was no content
854 # flag changed, even if there was no content
837 # change.
855 # change.
838 old_exec = m1.execf(f)
856 old_exec = m1.execf(f)
839 old_link = m1.linkf(f)
857 old_link = m1.linkf(f)
840 if old_exec != new_exec or old_link != new_link:
858 if old_exec != new_exec or old_link != new_link:
841 changed.append(f)
859 changed.append(f)
842 m1.set(f, new_exec, new_link)
860 m1.set(f, new_exec, new_link)
843 if use_dirstate:
861 if use_dirstate:
844 self.dirstate.normal(f)
862 self.dirstate.normal(f)
845
863
846 except (OSError, IOError):
864 except (OSError, IOError):
847 if use_dirstate:
865 if use_dirstate:
848 self.ui.warn(_("trouble committing %s!\n") % f)
866 self.ui.warn(_("trouble committing %s!\n") % f)
849 raise
867 raise
850 else:
868 else:
851 remove.append(f)
869 remove.append(f)
852
870
853 # update manifest
871 # update manifest
854 m1.update(new)
872 m1.update(new)
855 remove.sort()
873 remove.sort()
856 removed = []
874 removed = []
857
875
858 for f in remove:
876 for f in remove:
859 if f in m1:
877 if f in m1:
860 del m1[f]
878 del m1[f]
861 removed.append(f)
879 removed.append(f)
862 elif f in m2:
880 elif f in m2:
863 removed.append(f)
881 removed.append(f)
864 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
882 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
865 (new, removed))
883 (new, removed))
866
884
867 # add changeset
885 # add changeset
868 new = new.keys()
886 new = new.keys()
869 new.sort()
887 new.sort()
870
888
871 user = user or self.ui.username()
889 user = user or self.ui.username()
872 if (not empty_ok and not text) or force_editor:
890 if (not empty_ok and not text) or force_editor:
873 edittext = []
891 edittext = []
874 if text:
892 if text:
875 edittext.append(text)
893 edittext.append(text)
876 edittext.append("")
894 edittext.append("")
877 edittext.append(_("HG: Enter commit message."
895 edittext.append(_("HG: Enter commit message."
878 " Lines beginning with 'HG:' are removed."))
896 " Lines beginning with 'HG:' are removed."))
879 edittext.append("HG: --")
897 edittext.append("HG: --")
880 edittext.append("HG: user: %s" % user)
898 edittext.append("HG: user: %s" % user)
881 if p2 != nullid:
899 if p2 != nullid:
882 edittext.append("HG: branch merge")
900 edittext.append("HG: branch merge")
883 if branchname:
901 if branchname:
884 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
902 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
885 edittext.extend(["HG: changed %s" % f for f in changed])
903 edittext.extend(["HG: changed %s" % f for f in changed])
886 edittext.extend(["HG: removed %s" % f for f in removed])
904 edittext.extend(["HG: removed %s" % f for f in removed])
887 if not changed and not remove:
905 if not changed and not remove:
888 edittext.append("HG: no files changed")
906 edittext.append("HG: no files changed")
889 edittext.append("")
907 edittext.append("")
890 # run editor in the repository root
908 # run editor in the repository root
891 olddir = os.getcwd()
909 olddir = os.getcwd()
892 os.chdir(self.root)
910 os.chdir(self.root)
893 text = self.ui.edit("\n".join(edittext), user)
911 text = self.ui.edit("\n".join(edittext), user)
894 os.chdir(olddir)
912 os.chdir(olddir)
895
913
896 if branchname:
914 if branchname:
897 extra["branch"] = branchname
915 extra["branch"] = branchname
898
916
899 lines = [line.rstrip() for line in text.rstrip().splitlines()]
917 lines = [line.rstrip() for line in text.rstrip().splitlines()]
900 while lines and not lines[0]:
918 while lines and not lines[0]:
901 del lines[0]
919 del lines[0]
902 if not lines and use_dirstate:
920 if not lines and use_dirstate:
903 raise util.Abort(_("empty commit message"))
921 raise util.Abort(_("empty commit message"))
904 text = '\n'.join(lines)
922 text = '\n'.join(lines)
905
923
906 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
924 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
907 user, date, extra)
925 user, date, extra)
908 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
909 parent2=xp2)
927 parent2=xp2)
910 tr.close()
928 tr.close()
911
929
912 if self.branchcache:
930 if self.branchcache:
913 self.branchtags()
931 self.branchtags()
914
932
915 if use_dirstate or update_dirstate:
933 if use_dirstate or update_dirstate:
916 self.dirstate.setparents(n)
934 self.dirstate.setparents(n)
917 if use_dirstate:
935 if use_dirstate:
918 for f in removed:
936 for f in removed:
919 self.dirstate.forget(f)
937 self.dirstate.forget(f)
920 valid = 1 # our dirstate updates are complete
938 valid = 1 # our dirstate updates are complete
921
939
922 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
940 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
923 return n
941 return n
924 finally:
942 finally:
925 if not valid: # don't save our updated dirstate
943 if not valid: # don't save our updated dirstate
926 self.dirstate.invalidate()
944 self.dirstate.invalidate()
927 del tr, lock, wlock
945 del tr, lock, wlock
928
946
929 def walk(self, node=None, files=[], match=util.always, badmatch=None):
947 def walk(self, node=None, files=[], match=util.always, badmatch=None):
930 '''
948 '''
931 walk recursively through the directory tree or a given
949 walk recursively through the directory tree or a given
932 changeset, finding all files matched by the match
950 changeset, finding all files matched by the match
933 function
951 function
934
952
935 results are yielded in a tuple (src, filename), where src
953 results are yielded in a tuple (src, filename), where src
936 is one of:
954 is one of:
937 'f' the file was found in the directory tree
955 'f' the file was found in the directory tree
938 'm' the file was only in the dirstate and not in the tree
956 'm' the file was only in the dirstate and not in the tree
939 'b' file was not found and matched badmatch
957 'b' file was not found and matched badmatch
940 '''
958 '''
941
959
942 if node:
960 if node:
943 fdict = dict.fromkeys(files)
961 fdict = dict.fromkeys(files)
944 # for dirstate.walk, files=['.'] means "walk the whole tree".
962 # for dirstate.walk, files=['.'] means "walk the whole tree".
945 # follow that here, too
963 # follow that here, too
946 fdict.pop('.', None)
964 fdict.pop('.', None)
947 mdict = self.manifest.read(self.changelog.read(node)[0])
965 mdict = self.manifest.read(self.changelog.read(node)[0])
948 mfiles = mdict.keys()
966 mfiles = mdict.keys()
949 mfiles.sort()
967 mfiles.sort()
950 for fn in mfiles:
968 for fn in mfiles:
951 for ffn in fdict:
969 for ffn in fdict:
952 # match if the file is the exact name or a directory
970 # match if the file is the exact name or a directory
953 if ffn == fn or fn.startswith("%s/" % ffn):
971 if ffn == fn or fn.startswith("%s/" % ffn):
954 del fdict[ffn]
972 del fdict[ffn]
955 break
973 break
956 if match(fn):
974 if match(fn):
957 yield 'm', fn
975 yield 'm', fn
958 ffiles = fdict.keys()
976 ffiles = fdict.keys()
959 ffiles.sort()
977 ffiles.sort()
960 for fn in ffiles:
978 for fn in ffiles:
961 if badmatch and badmatch(fn):
979 if badmatch and badmatch(fn):
962 if match(fn):
980 if match(fn):
963 yield 'b', fn
981 yield 'b', fn
964 else:
982 else:
965 self.ui.warn(_('%s: No such file in rev %s\n')
983 self.ui.warn(_('%s: No such file in rev %s\n')
966 % (self.pathto(fn), short(node)))
984 % (self.pathto(fn), short(node)))
967 else:
985 else:
968 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
986 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
969 yield src, fn
987 yield src, fn
970
988
971 def status(self, node1=None, node2=None, files=[], match=util.always,
989 def status(self, node1=None, node2=None, files=[], match=util.always,
972 list_ignored=False, list_clean=False, list_unknown=True):
990 list_ignored=False, list_clean=False, list_unknown=True):
973 """return status of files between two nodes or node and working directory
991 """return status of files between two nodes or node and working directory
974
992
975 If node1 is None, use the first dirstate parent instead.
993 If node1 is None, use the first dirstate parent instead.
976 If node2 is None, compare node1 with working directory.
994 If node2 is None, compare node1 with working directory.
977 """
995 """
978
996
979 def fcmp(fn, getnode):
997 def fcmp(fn, getnode):
980 t1 = self.wread(fn)
998 t1 = self.wread(fn)
981 return self.file(fn).cmp(getnode(fn), t1)
999 return self.file(fn).cmp(getnode(fn), t1)
982
1000
983 def mfmatches(node):
1001 def mfmatches(node):
984 change = self.changelog.read(node)
1002 change = self.changelog.read(node)
985 mf = self.manifest.read(change[0]).copy()
1003 mf = self.manifest.read(change[0]).copy()
986 for fn in mf.keys():
1004 for fn in mf.keys():
987 if not match(fn):
1005 if not match(fn):
988 del mf[fn]
1006 del mf[fn]
989 return mf
1007 return mf
990
1008
991 modified, added, removed, deleted, unknown = [], [], [], [], []
1009 modified, added, removed, deleted, unknown = [], [], [], [], []
992 ignored, clean = [], []
1010 ignored, clean = [], []
993
1011
994 compareworking = False
1012 compareworking = False
995 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1013 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
996 compareworking = True
1014 compareworking = True
997
1015
998 if not compareworking:
1016 if not compareworking:
999 # read the manifest from node1 before the manifest from node2,
1017 # read the manifest from node1 before the manifest from node2,
1000 # so that we'll hit the manifest cache if we're going through
1018 # so that we'll hit the manifest cache if we're going through
1001 # all the revisions in parent->child order.
1019 # all the revisions in parent->child order.
1002 mf1 = mfmatches(node1)
1020 mf1 = mfmatches(node1)
1003
1021
1004 # are we comparing the working directory?
1022 # are we comparing the working directory?
1005 if not node2:
1023 if not node2:
1006 (lookup, modified, added, removed, deleted, unknown,
1024 (lookup, modified, added, removed, deleted, unknown,
1007 ignored, clean) = self.dirstate.status(files, match,
1025 ignored, clean) = self.dirstate.status(files, match,
1008 list_ignored, list_clean,
1026 list_ignored, list_clean,
1009 list_unknown)
1027 list_unknown)
1010
1028
1011 # are we comparing working dir against its parent?
1029 # are we comparing working dir against its parent?
1012 if compareworking:
1030 if compareworking:
1013 if lookup:
1031 if lookup:
1014 fixup = []
1032 fixup = []
1015 # do a full compare of any files that might have changed
1033 # do a full compare of any files that might have changed
1016 ctx = self.changectx()
1034 ctx = self.changectx()
1017 mexec = lambda f: 'x' in ctx.fileflags(f)
1035 mexec = lambda f: 'x' in ctx.fileflags(f)
1018 mlink = lambda f: 'l' in ctx.fileflags(f)
1036 mlink = lambda f: 'l' in ctx.fileflags(f)
1019 is_exec = util.execfunc(self.root, mexec)
1037 is_exec = util.execfunc(self.root, mexec)
1020 is_link = util.linkfunc(self.root, mlink)
1038 is_link = util.linkfunc(self.root, mlink)
1021 def flags(f):
1039 def flags(f):
1022 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1040 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1023 for f in lookup:
1041 for f in lookup:
1024 if (f not in ctx or flags(f) != ctx.fileflags(f)
1042 if (f not in ctx or flags(f) != ctx.fileflags(f)
1025 or ctx[f].cmp(self.wread(f))):
1043 or ctx[f].cmp(self.wread(f))):
1026 modified.append(f)
1044 modified.append(f)
1027 else:
1045 else:
1028 fixup.append(f)
1046 fixup.append(f)
1029 if list_clean:
1047 if list_clean:
1030 clean.append(f)
1048 clean.append(f)
1031
1049
1032 # update dirstate for files that are actually clean
1050 # update dirstate for files that are actually clean
1033 if fixup:
1051 if fixup:
1034 wlock = None
1052 wlock = None
1035 try:
1053 try:
1036 try:
1054 try:
1037 wlock = self.wlock(False)
1055 wlock = self.wlock(False)
1038 except lock.LockException:
1056 except lock.LockException:
1039 pass
1057 pass
1040 if wlock:
1058 if wlock:
1041 for f in fixup:
1059 for f in fixup:
1042 self.dirstate.normal(f)
1060 self.dirstate.normal(f)
1043 finally:
1061 finally:
1044 del wlock
1062 del wlock
1045 else:
1063 else:
1046 # we are comparing working dir against non-parent
1064 # we are comparing working dir against non-parent
1047 # generate a pseudo-manifest for the working dir
1065 # generate a pseudo-manifest for the working dir
1048 # XXX: create it in dirstate.py ?
1066 # XXX: create it in dirstate.py ?
1049 mf2 = mfmatches(self.dirstate.parents()[0])
1067 mf2 = mfmatches(self.dirstate.parents()[0])
1050 is_exec = util.execfunc(self.root, mf2.execf)
1068 is_exec = util.execfunc(self.root, mf2.execf)
1051 is_link = util.linkfunc(self.root, mf2.linkf)
1069 is_link = util.linkfunc(self.root, mf2.linkf)
1052 for f in lookup + modified + added:
1070 for f in lookup + modified + added:
1053 mf2[f] = ""
1071 mf2[f] = ""
1054 mf2.set(f, is_exec(f), is_link(f))
1072 mf2.set(f, is_exec(f), is_link(f))
1055 for f in removed:
1073 for f in removed:
1056 if f in mf2:
1074 if f in mf2:
1057 del mf2[f]
1075 del mf2[f]
1058
1076
1059 else:
1077 else:
1060 # we are comparing two revisions
1078 # we are comparing two revisions
1061 mf2 = mfmatches(node2)
1079 mf2 = mfmatches(node2)
1062
1080
1063 if not compareworking:
1081 if not compareworking:
1064 # flush lists from dirstate before comparing manifests
1082 # flush lists from dirstate before comparing manifests
1065 modified, added, clean = [], [], []
1083 modified, added, clean = [], [], []
1066
1084
1067 # make sure to sort the files so we talk to the disk in a
1085 # make sure to sort the files so we talk to the disk in a
1068 # reasonable order
1086 # reasonable order
1069 mf2keys = mf2.keys()
1087 mf2keys = mf2.keys()
1070 mf2keys.sort()
1088 mf2keys.sort()
1071 getnode = lambda fn: mf1.get(fn, nullid)
1089 getnode = lambda fn: mf1.get(fn, nullid)
1072 for fn in mf2keys:
1090 for fn in mf2keys:
1073 if fn in mf1:
1091 if fn in mf1:
1074 if (mf1.flags(fn) != mf2.flags(fn) or
1092 if (mf1.flags(fn) != mf2.flags(fn) or
1075 (mf1[fn] != mf2[fn] and
1093 (mf1[fn] != mf2[fn] and
1076 (mf2[fn] != "" or fcmp(fn, getnode)))):
1094 (mf2[fn] != "" or fcmp(fn, getnode)))):
1077 modified.append(fn)
1095 modified.append(fn)
1078 elif list_clean:
1096 elif list_clean:
1079 clean.append(fn)
1097 clean.append(fn)
1080 del mf1[fn]
1098 del mf1[fn]
1081 else:
1099 else:
1082 added.append(fn)
1100 added.append(fn)
1083
1101
1084 removed = mf1.keys()
1102 removed = mf1.keys()
1085
1103
1086 # sort and return results:
1104 # sort and return results:
1087 for l in modified, added, removed, deleted, unknown, ignored, clean:
1105 for l in modified, added, removed, deleted, unknown, ignored, clean:
1088 l.sort()
1106 l.sort()
1089 return (modified, added, removed, deleted, unknown, ignored, clean)
1107 return (modified, added, removed, deleted, unknown, ignored, clean)
1090
1108
1091 def add(self, list):
1109 def add(self, list):
1092 wlock = self.wlock()
1110 wlock = self.wlock()
1093 try:
1111 try:
1094 rejected = []
1112 rejected = []
1095 for f in list:
1113 for f in list:
1096 p = self.wjoin(f)
1114 p = self.wjoin(f)
1097 try:
1115 try:
1098 st = os.lstat(p)
1116 st = os.lstat(p)
1099 except:
1117 except:
1100 self.ui.warn(_("%s does not exist!\n") % f)
1118 self.ui.warn(_("%s does not exist!\n") % f)
1101 rejected.append(f)
1119 rejected.append(f)
1102 continue
1120 continue
1103 if st.st_size > 10000000:
1121 if st.st_size > 10000000:
1104 self.ui.warn(_("%s: files over 10MB may cause memory and"
1122 self.ui.warn(_("%s: files over 10MB may cause memory and"
1105 " performance problems\n"
1123 " performance problems\n"
1106 "(use 'hg revert %s' to unadd the file)\n")
1124 "(use 'hg revert %s' to unadd the file)\n")
1107 % (f, f))
1125 % (f, f))
1108 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1126 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1109 self.ui.warn(_("%s not added: only files and symlinks "
1127 self.ui.warn(_("%s not added: only files and symlinks "
1110 "supported currently\n") % f)
1128 "supported currently\n") % f)
1111 rejected.append(p)
1129 rejected.append(p)
1112 elif self.dirstate[f] in 'amn':
1130 elif self.dirstate[f] in 'amn':
1113 self.ui.warn(_("%s already tracked!\n") % f)
1131 self.ui.warn(_("%s already tracked!\n") % f)
1114 elif self.dirstate[f] == 'r':
1132 elif self.dirstate[f] == 'r':
1115 self.dirstate.normallookup(f)
1133 self.dirstate.normallookup(f)
1116 else:
1134 else:
1117 self.dirstate.add(f)
1135 self.dirstate.add(f)
1118 return rejected
1136 return rejected
1119 finally:
1137 finally:
1120 del wlock
1138 del wlock
1121
1139
1122 def forget(self, list):
1140 def forget(self, list):
1123 wlock = self.wlock()
1141 wlock = self.wlock()
1124 try:
1142 try:
1125 for f in list:
1143 for f in list:
1126 if self.dirstate[f] != 'a':
1144 if self.dirstate[f] != 'a':
1127 self.ui.warn(_("%s not added!\n") % f)
1145 self.ui.warn(_("%s not added!\n") % f)
1128 else:
1146 else:
1129 self.dirstate.forget(f)
1147 self.dirstate.forget(f)
1130 finally:
1148 finally:
1131 del wlock
1149 del wlock
1132
1150
1133 def remove(self, list, unlink=False):
1151 def remove(self, list, unlink=False):
1134 wlock = None
1152 wlock = None
1135 try:
1153 try:
1136 if unlink:
1154 if unlink:
1137 for f in list:
1155 for f in list:
1138 try:
1156 try:
1139 util.unlink(self.wjoin(f))
1157 util.unlink(self.wjoin(f))
1140 except OSError, inst:
1158 except OSError, inst:
1141 if inst.errno != errno.ENOENT:
1159 if inst.errno != errno.ENOENT:
1142 raise
1160 raise
1143 wlock = self.wlock()
1161 wlock = self.wlock()
1144 for f in list:
1162 for f in list:
1145 if unlink and os.path.exists(self.wjoin(f)):
1163 if unlink and os.path.exists(self.wjoin(f)):
1146 self.ui.warn(_("%s still exists!\n") % f)
1164 self.ui.warn(_("%s still exists!\n") % f)
1147 elif self.dirstate[f] == 'a':
1165 elif self.dirstate[f] == 'a':
1148 self.dirstate.forget(f)
1166 self.dirstate.forget(f)
1149 elif f not in self.dirstate:
1167 elif f not in self.dirstate:
1150 self.ui.warn(_("%s not tracked!\n") % f)
1168 self.ui.warn(_("%s not tracked!\n") % f)
1151 else:
1169 else:
1152 self.dirstate.remove(f)
1170 self.dirstate.remove(f)
1153 finally:
1171 finally:
1154 del wlock
1172 del wlock
1155
1173
1156 def undelete(self, list):
1174 def undelete(self, list):
1157 wlock = None
1175 wlock = None
1158 try:
1176 try:
1159 manifests = [self.manifest.read(self.changelog.read(p)[0])
1177 manifests = [self.manifest.read(self.changelog.read(p)[0])
1160 for p in self.dirstate.parents() if p != nullid]
1178 for p in self.dirstate.parents() if p != nullid]
1161 wlock = self.wlock()
1179 wlock = self.wlock()
1162 for f in list:
1180 for f in list:
1163 if self.dirstate[f] != 'r':
1181 if self.dirstate[f] != 'r':
1164 self.ui.warn("%s not removed!\n" % f)
1182 self.ui.warn("%s not removed!\n" % f)
1165 else:
1183 else:
1166 m = f in manifests[0] and manifests[0] or manifests[1]
1184 m = f in manifests[0] and manifests[0] or manifests[1]
1167 t = self.file(f).read(m[f])
1185 t = self.file(f).read(m[f])
1168 self.wwrite(f, t, m.flags(f))
1186 self.wwrite(f, t, m.flags(f))
1169 self.dirstate.normal(f)
1187 self.dirstate.normal(f)
1170 finally:
1188 finally:
1171 del wlock
1189 del wlock
1172
1190
1173 def copy(self, source, dest):
1191 def copy(self, source, dest):
1174 wlock = None
1192 wlock = None
1175 try:
1193 try:
1176 p = self.wjoin(dest)
1194 p = self.wjoin(dest)
1177 if not (os.path.exists(p) or os.path.islink(p)):
1195 if not (os.path.exists(p) or os.path.islink(p)):
1178 self.ui.warn(_("%s does not exist!\n") % dest)
1196 self.ui.warn(_("%s does not exist!\n") % dest)
1179 elif not (os.path.isfile(p) or os.path.islink(p)):
1197 elif not (os.path.isfile(p) or os.path.islink(p)):
1180 self.ui.warn(_("copy failed: %s is not a file or a "
1198 self.ui.warn(_("copy failed: %s is not a file or a "
1181 "symbolic link\n") % dest)
1199 "symbolic link\n") % dest)
1182 else:
1200 else:
1183 wlock = self.wlock()
1201 wlock = self.wlock()
1184 if dest not in self.dirstate:
1202 if dest not in self.dirstate:
1185 self.dirstate.add(dest)
1203 self.dirstate.add(dest)
1186 self.dirstate.copy(source, dest)
1204 self.dirstate.copy(source, dest)
1187 finally:
1205 finally:
1188 del wlock
1206 del wlock
1189
1207
1190 def heads(self, start=None):
1208 def heads(self, start=None):
1191 heads = self.changelog.heads(start)
1209 heads = self.changelog.heads(start)
1192 # sort the output in rev descending order
1210 # sort the output in rev descending order
1193 heads = [(-self.changelog.rev(h), h) for h in heads]
1211 heads = [(-self.changelog.rev(h), h) for h in heads]
1194 heads.sort()
1212 heads.sort()
1195 return [n for (r, n) in heads]
1213 return [n for (r, n) in heads]
1196
1214
1197 def branchheads(self, branch, start=None):
1215 def branchheads(self, branch, start=None):
1198 branches = self.branchtags()
1216 branches = self.branchtags()
1199 if branch not in branches:
1217 if branch not in branches:
1200 return []
1218 return []
1201 # The basic algorithm is this:
1219 # The basic algorithm is this:
1202 #
1220 #
1203 # Start from the branch tip since there are no later revisions that can
1221 # Start from the branch tip since there are no later revisions that can
1204 # possibly be in this branch, and the tip is a guaranteed head.
1222 # possibly be in this branch, and the tip is a guaranteed head.
1205 #
1223 #
1206 # Remember the tip's parents as the first ancestors, since these by
1224 # Remember the tip's parents as the first ancestors, since these by
1207 # definition are not heads.
1225 # definition are not heads.
1208 #
1226 #
1209 # Step backwards from the brach tip through all the revisions. We are
1227 # Step backwards from the brach tip through all the revisions. We are
1210 # guaranteed by the rules of Mercurial that we will now be visiting the
1228 # guaranteed by the rules of Mercurial that we will now be visiting the
1211 # nodes in reverse topological order (children before parents).
1229 # nodes in reverse topological order (children before parents).
1212 #
1230 #
1213 # If a revision is one of the ancestors of a head then we can toss it
1231 # If a revision is one of the ancestors of a head then we can toss it
1214 # out of the ancestors set (we've already found it and won't be
1232 # out of the ancestors set (we've already found it and won't be
1215 # visiting it again) and put its parents in the ancestors set.
1233 # visiting it again) and put its parents in the ancestors set.
1216 #
1234 #
1217 # Otherwise, if a revision is in the branch it's another head, since it
1235 # Otherwise, if a revision is in the branch it's another head, since it
1218 # wasn't in the ancestor list of an existing head. So add it to the
1236 # wasn't in the ancestor list of an existing head. So add it to the
1219 # head list, and add its parents to the ancestor list.
1237 # head list, and add its parents to the ancestor list.
1220 #
1238 #
1221 # If it is not in the branch ignore it.
1239 # If it is not in the branch ignore it.
1222 #
1240 #
1223 # Once we have a list of heads, use nodesbetween to filter out all the
1241 # Once we have a list of heads, use nodesbetween to filter out all the
1224 # heads that cannot be reached from startrev. There may be a more
1242 # heads that cannot be reached from startrev. There may be a more
1225 # efficient way to do this as part of the previous algorithm.
1243 # efficient way to do this as part of the previous algorithm.
1226
1244
1227 set = util.set
1245 set = util.set
1228 heads = [self.changelog.rev(branches[branch])]
1246 heads = [self.changelog.rev(branches[branch])]
1229 # Don't care if ancestors contains nullrev or not.
1247 # Don't care if ancestors contains nullrev or not.
1230 ancestors = set(self.changelog.parentrevs(heads[0]))
1248 ancestors = set(self.changelog.parentrevs(heads[0]))
1231 for rev in xrange(heads[0] - 1, nullrev, -1):
1249 for rev in xrange(heads[0] - 1, nullrev, -1):
1232 if rev in ancestors:
1250 if rev in ancestors:
1233 ancestors.update(self.changelog.parentrevs(rev))
1251 ancestors.update(self.changelog.parentrevs(rev))
1234 ancestors.remove(rev)
1252 ancestors.remove(rev)
1235 elif self.changectx(rev).branch() == branch:
1253 elif self.changectx(rev).branch() == branch:
1236 heads.append(rev)
1254 heads.append(rev)
1237 ancestors.update(self.changelog.parentrevs(rev))
1255 ancestors.update(self.changelog.parentrevs(rev))
1238 heads = [self.changelog.node(rev) for rev in heads]
1256 heads = [self.changelog.node(rev) for rev in heads]
1239 if start is not None:
1257 if start is not None:
1240 heads = self.changelog.nodesbetween([start], heads)[2]
1258 heads = self.changelog.nodesbetween([start], heads)[2]
1241 return heads
1259 return heads
1242
1260
1243 def branches(self, nodes):
1261 def branches(self, nodes):
1244 if not nodes:
1262 if not nodes:
1245 nodes = [self.changelog.tip()]
1263 nodes = [self.changelog.tip()]
1246 b = []
1264 b = []
1247 for n in nodes:
1265 for n in nodes:
1248 t = n
1266 t = n
1249 while 1:
1267 while 1:
1250 p = self.changelog.parents(n)
1268 p = self.changelog.parents(n)
1251 if p[1] != nullid or p[0] == nullid:
1269 if p[1] != nullid or p[0] == nullid:
1252 b.append((t, n, p[0], p[1]))
1270 b.append((t, n, p[0], p[1]))
1253 break
1271 break
1254 n = p[0]
1272 n = p[0]
1255 return b
1273 return b
1256
1274
1257 def between(self, pairs):
1275 def between(self, pairs):
1258 r = []
1276 r = []
1259
1277
1260 for top, bottom in pairs:
1278 for top, bottom in pairs:
1261 n, l, i = top, [], 0
1279 n, l, i = top, [], 0
1262 f = 1
1280 f = 1
1263
1281
1264 while n != bottom:
1282 while n != bottom:
1265 p = self.changelog.parents(n)[0]
1283 p = self.changelog.parents(n)[0]
1266 if i == f:
1284 if i == f:
1267 l.append(n)
1285 l.append(n)
1268 f = f * 2
1286 f = f * 2
1269 n = p
1287 n = p
1270 i += 1
1288 i += 1
1271
1289
1272 r.append(l)
1290 r.append(l)
1273
1291
1274 return r
1292 return r
1275
1293
1276 def findincoming(self, remote, base=None, heads=None, force=False):
1294 def findincoming(self, remote, base=None, heads=None, force=False):
1277 """Return list of roots of the subsets of missing nodes from remote
1295 """Return list of roots of the subsets of missing nodes from remote
1278
1296
1279 If base dict is specified, assume that these nodes and their parents
1297 If base dict is specified, assume that these nodes and their parents
1280 exist on the remote side and that no child of a node of base exists
1298 exist on the remote side and that no child of a node of base exists
1281 in both remote and self.
1299 in both remote and self.
1282 Furthermore base will be updated to include the nodes that exists
1300 Furthermore base will be updated to include the nodes that exists
1283 in self and remote but no children exists in self and remote.
1301 in self and remote but no children exists in self and remote.
1284 If a list of heads is specified, return only nodes which are heads
1302 If a list of heads is specified, return only nodes which are heads
1285 or ancestors of these heads.
1303 or ancestors of these heads.
1286
1304
1287 All the ancestors of base are in self and in remote.
1305 All the ancestors of base are in self and in remote.
1288 All the descendants of the list returned are missing in self.
1306 All the descendants of the list returned are missing in self.
1289 (and so we know that the rest of the nodes are missing in remote, see
1307 (and so we know that the rest of the nodes are missing in remote, see
1290 outgoing)
1308 outgoing)
1291 """
1309 """
1292 m = self.changelog.nodemap
1310 m = self.changelog.nodemap
1293 search = []
1311 search = []
1294 fetch = {}
1312 fetch = {}
1295 seen = {}
1313 seen = {}
1296 seenbranch = {}
1314 seenbranch = {}
1297 if base == None:
1315 if base == None:
1298 base = {}
1316 base = {}
1299
1317
1300 if not heads:
1318 if not heads:
1301 heads = remote.heads()
1319 heads = remote.heads()
1302
1320
1303 if self.changelog.tip() == nullid:
1321 if self.changelog.tip() == nullid:
1304 base[nullid] = 1
1322 base[nullid] = 1
1305 if heads != [nullid]:
1323 if heads != [nullid]:
1306 return [nullid]
1324 return [nullid]
1307 return []
1325 return []
1308
1326
1309 # assume we're closer to the tip than the root
1327 # assume we're closer to the tip than the root
1310 # and start by examining the heads
1328 # and start by examining the heads
1311 self.ui.status(_("searching for changes\n"))
1329 self.ui.status(_("searching for changes\n"))
1312
1330
1313 unknown = []
1331 unknown = []
1314 for h in heads:
1332 for h in heads:
1315 if h not in m:
1333 if h not in m:
1316 unknown.append(h)
1334 unknown.append(h)
1317 else:
1335 else:
1318 base[h] = 1
1336 base[h] = 1
1319
1337
1320 if not unknown:
1338 if not unknown:
1321 return []
1339 return []
1322
1340
1323 req = dict.fromkeys(unknown)
1341 req = dict.fromkeys(unknown)
1324 reqcnt = 0
1342 reqcnt = 0
1325
1343
1326 # search through remote branches
1344 # search through remote branches
1327 # a 'branch' here is a linear segment of history, with four parts:
1345 # a 'branch' here is a linear segment of history, with four parts:
1328 # head, root, first parent, second parent
1346 # head, root, first parent, second parent
1329 # (a branch always has two parents (or none) by definition)
1347 # (a branch always has two parents (or none) by definition)
1330 unknown = remote.branches(unknown)
1348 unknown = remote.branches(unknown)
1331 while unknown:
1349 while unknown:
1332 r = []
1350 r = []
1333 while unknown:
1351 while unknown:
1334 n = unknown.pop(0)
1352 n = unknown.pop(0)
1335 if n[0] in seen:
1353 if n[0] in seen:
1336 continue
1354 continue
1337
1355
1338 self.ui.debug(_("examining %s:%s\n")
1356 self.ui.debug(_("examining %s:%s\n")
1339 % (short(n[0]), short(n[1])))
1357 % (short(n[0]), short(n[1])))
1340 if n[0] == nullid: # found the end of the branch
1358 if n[0] == nullid: # found the end of the branch
1341 pass
1359 pass
1342 elif n in seenbranch:
1360 elif n in seenbranch:
1343 self.ui.debug(_("branch already found\n"))
1361 self.ui.debug(_("branch already found\n"))
1344 continue
1362 continue
1345 elif n[1] and n[1] in m: # do we know the base?
1363 elif n[1] and n[1] in m: # do we know the base?
1346 self.ui.debug(_("found incomplete branch %s:%s\n")
1364 self.ui.debug(_("found incomplete branch %s:%s\n")
1347 % (short(n[0]), short(n[1])))
1365 % (short(n[0]), short(n[1])))
1348 search.append(n) # schedule branch range for scanning
1366 search.append(n) # schedule branch range for scanning
1349 seenbranch[n] = 1
1367 seenbranch[n] = 1
1350 else:
1368 else:
1351 if n[1] not in seen and n[1] not in fetch:
1369 if n[1] not in seen and n[1] not in fetch:
1352 if n[2] in m and n[3] in m:
1370 if n[2] in m and n[3] in m:
1353 self.ui.debug(_("found new changeset %s\n") %
1371 self.ui.debug(_("found new changeset %s\n") %
1354 short(n[1]))
1372 short(n[1]))
1355 fetch[n[1]] = 1 # earliest unknown
1373 fetch[n[1]] = 1 # earliest unknown
1356 for p in n[2:4]:
1374 for p in n[2:4]:
1357 if p in m:
1375 if p in m:
1358 base[p] = 1 # latest known
1376 base[p] = 1 # latest known
1359
1377
1360 for p in n[2:4]:
1378 for p in n[2:4]:
1361 if p not in req and p not in m:
1379 if p not in req and p not in m:
1362 r.append(p)
1380 r.append(p)
1363 req[p] = 1
1381 req[p] = 1
1364 seen[n[0]] = 1
1382 seen[n[0]] = 1
1365
1383
1366 if r:
1384 if r:
1367 reqcnt += 1
1385 reqcnt += 1
1368 self.ui.debug(_("request %d: %s\n") %
1386 self.ui.debug(_("request %d: %s\n") %
1369 (reqcnt, " ".join(map(short, r))))
1387 (reqcnt, " ".join(map(short, r))))
1370 for p in xrange(0, len(r), 10):
1388 for p in xrange(0, len(r), 10):
1371 for b in remote.branches(r[p:p+10]):
1389 for b in remote.branches(r[p:p+10]):
1372 self.ui.debug(_("received %s:%s\n") %
1390 self.ui.debug(_("received %s:%s\n") %
1373 (short(b[0]), short(b[1])))
1391 (short(b[0]), short(b[1])))
1374 unknown.append(b)
1392 unknown.append(b)
1375
1393
1376 # do binary search on the branches we found
1394 # do binary search on the branches we found
1377 while search:
1395 while search:
1378 n = search.pop(0)
1396 n = search.pop(0)
1379 reqcnt += 1
1397 reqcnt += 1
1380 l = remote.between([(n[0], n[1])])[0]
1398 l = remote.between([(n[0], n[1])])[0]
1381 l.append(n[1])
1399 l.append(n[1])
1382 p = n[0]
1400 p = n[0]
1383 f = 1
1401 f = 1
1384 for i in l:
1402 for i in l:
1385 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1403 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1386 if i in m:
1404 if i in m:
1387 if f <= 2:
1405 if f <= 2:
1388 self.ui.debug(_("found new branch changeset %s\n") %
1406 self.ui.debug(_("found new branch changeset %s\n") %
1389 short(p))
1407 short(p))
1390 fetch[p] = 1
1408 fetch[p] = 1
1391 base[i] = 1
1409 base[i] = 1
1392 else:
1410 else:
1393 self.ui.debug(_("narrowed branch search to %s:%s\n")
1411 self.ui.debug(_("narrowed branch search to %s:%s\n")
1394 % (short(p), short(i)))
1412 % (short(p), short(i)))
1395 search.append((p, i))
1413 search.append((p, i))
1396 break
1414 break
1397 p, f = i, f * 2
1415 p, f = i, f * 2
1398
1416
1399 # sanity check our fetch list
1417 # sanity check our fetch list
1400 for f in fetch.keys():
1418 for f in fetch.keys():
1401 if f in m:
1419 if f in m:
1402 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1420 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1403
1421
1404 if base.keys() == [nullid]:
1422 if base.keys() == [nullid]:
1405 if force:
1423 if force:
1406 self.ui.warn(_("warning: repository is unrelated\n"))
1424 self.ui.warn(_("warning: repository is unrelated\n"))
1407 else:
1425 else:
1408 raise util.Abort(_("repository is unrelated"))
1426 raise util.Abort(_("repository is unrelated"))
1409
1427
1410 self.ui.debug(_("found new changesets starting at ") +
1428 self.ui.debug(_("found new changesets starting at ") +
1411 " ".join([short(f) for f in fetch]) + "\n")
1429 " ".join([short(f) for f in fetch]) + "\n")
1412
1430
1413 self.ui.debug(_("%d total queries\n") % reqcnt)
1431 self.ui.debug(_("%d total queries\n") % reqcnt)
1414
1432
1415 return fetch.keys()
1433 return fetch.keys()
1416
1434
1417 def findoutgoing(self, remote, base=None, heads=None, force=False):
1435 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 """Return list of nodes that are roots of subsets not in remote
1436 """Return list of nodes that are roots of subsets not in remote
1419
1437
1420 If base dict is specified, assume that these nodes and their parents
1438 If base dict is specified, assume that these nodes and their parents
1421 exist on the remote side.
1439 exist on the remote side.
1422 If a list of heads is specified, return only nodes which are heads
1440 If a list of heads is specified, return only nodes which are heads
1423 or ancestors of these heads, and return a second element which
1441 or ancestors of these heads, and return a second element which
1424 contains all remote heads which get new children.
1442 contains all remote heads which get new children.
1425 """
1443 """
1426 if base == None:
1444 if base == None:
1427 base = {}
1445 base = {}
1428 self.findincoming(remote, base, heads, force=force)
1446 self.findincoming(remote, base, heads, force=force)
1429
1447
1430 self.ui.debug(_("common changesets up to ")
1448 self.ui.debug(_("common changesets up to ")
1431 + " ".join(map(short, base.keys())) + "\n")
1449 + " ".join(map(short, base.keys())) + "\n")
1432
1450
1433 remain = dict.fromkeys(self.changelog.nodemap)
1451 remain = dict.fromkeys(self.changelog.nodemap)
1434
1452
1435 # prune everything remote has from the tree
1453 # prune everything remote has from the tree
1436 del remain[nullid]
1454 del remain[nullid]
1437 remove = base.keys()
1455 remove = base.keys()
1438 while remove:
1456 while remove:
1439 n = remove.pop(0)
1457 n = remove.pop(0)
1440 if n in remain:
1458 if n in remain:
1441 del remain[n]
1459 del remain[n]
1442 for p in self.changelog.parents(n):
1460 for p in self.changelog.parents(n):
1443 remove.append(p)
1461 remove.append(p)
1444
1462
1445 # find every node whose parents have been pruned
1463 # find every node whose parents have been pruned
1446 subset = []
1464 subset = []
1447 # find every remote head that will get new children
1465 # find every remote head that will get new children
1448 updated_heads = {}
1466 updated_heads = {}
1449 for n in remain:
1467 for n in remain:
1450 p1, p2 = self.changelog.parents(n)
1468 p1, p2 = self.changelog.parents(n)
1451 if p1 not in remain and p2 not in remain:
1469 if p1 not in remain and p2 not in remain:
1452 subset.append(n)
1470 subset.append(n)
1453 if heads:
1471 if heads:
1454 if p1 in heads:
1472 if p1 in heads:
1455 updated_heads[p1] = True
1473 updated_heads[p1] = True
1456 if p2 in heads:
1474 if p2 in heads:
1457 updated_heads[p2] = True
1475 updated_heads[p2] = True
1458
1476
1459 # this is the set of all roots we have to push
1477 # this is the set of all roots we have to push
1460 if heads:
1478 if heads:
1461 return subset, updated_heads.keys()
1479 return subset, updated_heads.keys()
1462 else:
1480 else:
1463 return subset
1481 return subset
1464
1482
1465 def pull(self, remote, heads=None, force=False):
1483 def pull(self, remote, heads=None, force=False):
1466 lock = self.lock()
1484 lock = self.lock()
1467 try:
1485 try:
1468 fetch = self.findincoming(remote, heads=heads, force=force)
1486 fetch = self.findincoming(remote, heads=heads, force=force)
1469 if fetch == [nullid]:
1487 if fetch == [nullid]:
1470 self.ui.status(_("requesting all changes\n"))
1488 self.ui.status(_("requesting all changes\n"))
1471
1489
1472 if not fetch:
1490 if not fetch:
1473 self.ui.status(_("no changes found\n"))
1491 self.ui.status(_("no changes found\n"))
1474 return 0
1492 return 0
1475
1493
1476 if heads is None:
1494 if heads is None:
1477 cg = remote.changegroup(fetch, 'pull')
1495 cg = remote.changegroup(fetch, 'pull')
1478 else:
1496 else:
1479 if 'changegroupsubset' not in remote.capabilities:
1497 if 'changegroupsubset' not in remote.capabilities:
1480 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1498 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1481 cg = remote.changegroupsubset(fetch, heads, 'pull')
1499 cg = remote.changegroupsubset(fetch, heads, 'pull')
1482 return self.addchangegroup(cg, 'pull', remote.url())
1500 return self.addchangegroup(cg, 'pull', remote.url())
1483 finally:
1501 finally:
1484 del lock
1502 del lock
1485
1503
1486 def push(self, remote, force=False, revs=None):
1504 def push(self, remote, force=False, revs=None):
1487 # there are two ways to push to remote repo:
1505 # there are two ways to push to remote repo:
1488 #
1506 #
1489 # addchangegroup assumes local user can lock remote
1507 # addchangegroup assumes local user can lock remote
1490 # repo (local filesystem, old ssh servers).
1508 # repo (local filesystem, old ssh servers).
1491 #
1509 #
1492 # unbundle assumes local user cannot lock remote repo (new ssh
1510 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # servers, http servers).
1511 # servers, http servers).
1494
1512
1495 if remote.capable('unbundle'):
1513 if remote.capable('unbundle'):
1496 return self.push_unbundle(remote, force, revs)
1514 return self.push_unbundle(remote, force, revs)
1497 return self.push_addchangegroup(remote, force, revs)
1515 return self.push_addchangegroup(remote, force, revs)
1498
1516
1499 def prepush(self, remote, force, revs):
1517 def prepush(self, remote, force, revs):
1500 base = {}
1518 base = {}
1501 remote_heads = remote.heads()
1519 remote_heads = remote.heads()
1502 inc = self.findincoming(remote, base, remote_heads, force=force)
1520 inc = self.findincoming(remote, base, remote_heads, force=force)
1503
1521
1504 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1522 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1505 if revs is not None:
1523 if revs is not None:
1506 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1524 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1507 else:
1525 else:
1508 bases, heads = update, self.changelog.heads()
1526 bases, heads = update, self.changelog.heads()
1509
1527
1510 if not bases:
1528 if not bases:
1511 self.ui.status(_("no changes found\n"))
1529 self.ui.status(_("no changes found\n"))
1512 return None, 1
1530 return None, 1
1513 elif not force:
1531 elif not force:
1514 # check if we're creating new remote heads
1532 # check if we're creating new remote heads
1515 # to be a remote head after push, node must be either
1533 # to be a remote head after push, node must be either
1516 # - unknown locally
1534 # - unknown locally
1517 # - a local outgoing head descended from update
1535 # - a local outgoing head descended from update
1518 # - a remote head that's known locally and not
1536 # - a remote head that's known locally and not
1519 # ancestral to an outgoing head
1537 # ancestral to an outgoing head
1520
1538
1521 warn = 0
1539 warn = 0
1522
1540
1523 if remote_heads == [nullid]:
1541 if remote_heads == [nullid]:
1524 warn = 0
1542 warn = 0
1525 elif not revs and len(heads) > len(remote_heads):
1543 elif not revs and len(heads) > len(remote_heads):
1526 warn = 1
1544 warn = 1
1527 else:
1545 else:
1528 newheads = list(heads)
1546 newheads = list(heads)
1529 for r in remote_heads:
1547 for r in remote_heads:
1530 if r in self.changelog.nodemap:
1548 if r in self.changelog.nodemap:
1531 desc = self.changelog.heads(r, heads)
1549 desc = self.changelog.heads(r, heads)
1532 l = [h for h in heads if h in desc]
1550 l = [h for h in heads if h in desc]
1533 if not l:
1551 if not l:
1534 newheads.append(r)
1552 newheads.append(r)
1535 else:
1553 else:
1536 newheads.append(r)
1554 newheads.append(r)
1537 if len(newheads) > len(remote_heads):
1555 if len(newheads) > len(remote_heads):
1538 warn = 1
1556 warn = 1
1539
1557
1540 if warn:
1558 if warn:
1541 self.ui.warn(_("abort: push creates new remote heads!\n"))
1559 self.ui.warn(_("abort: push creates new remote heads!\n"))
1542 self.ui.status(_("(did you forget to merge?"
1560 self.ui.status(_("(did you forget to merge?"
1543 " use push -f to force)\n"))
1561 " use push -f to force)\n"))
1544 return None, 0
1562 return None, 0
1545 elif inc:
1563 elif inc:
1546 self.ui.warn(_("note: unsynced remote changes!\n"))
1564 self.ui.warn(_("note: unsynced remote changes!\n"))
1547
1565
1548
1566
1549 if revs is None:
1567 if revs is None:
1550 cg = self.changegroup(update, 'push')
1568 cg = self.changegroup(update, 'push')
1551 else:
1569 else:
1552 cg = self.changegroupsubset(update, revs, 'push')
1570 cg = self.changegroupsubset(update, revs, 'push')
1553 return cg, remote_heads
1571 return cg, remote_heads
1554
1572
1555 def push_addchangegroup(self, remote, force, revs):
1573 def push_addchangegroup(self, remote, force, revs):
1556 lock = remote.lock()
1574 lock = remote.lock()
1557 try:
1575 try:
1558 ret = self.prepush(remote, force, revs)
1576 ret = self.prepush(remote, force, revs)
1559 if ret[0] is not None:
1577 if ret[0] is not None:
1560 cg, remote_heads = ret
1578 cg, remote_heads = ret
1561 return remote.addchangegroup(cg, 'push', self.url())
1579 return remote.addchangegroup(cg, 'push', self.url())
1562 return ret[1]
1580 return ret[1]
1563 finally:
1581 finally:
1564 del lock
1582 del lock
1565
1583
1566 def push_unbundle(self, remote, force, revs):
1584 def push_unbundle(self, remote, force, revs):
1567 # local repo finds heads on server, finds out what revs it
1585 # local repo finds heads on server, finds out what revs it
1568 # must push. once revs transferred, if server finds it has
1586 # must push. once revs transferred, if server finds it has
1569 # different heads (someone else won commit/push race), server
1587 # different heads (someone else won commit/push race), server
1570 # aborts.
1588 # aborts.
1571
1589
1572 ret = self.prepush(remote, force, revs)
1590 ret = self.prepush(remote, force, revs)
1573 if ret[0] is not None:
1591 if ret[0] is not None:
1574 cg, remote_heads = ret
1592 cg, remote_heads = ret
1575 if force: remote_heads = ['force']
1593 if force: remote_heads = ['force']
1576 return remote.unbundle(cg, remote_heads, 'push')
1594 return remote.unbundle(cg, remote_heads, 'push')
1577 return ret[1]
1595 return ret[1]
1578
1596
1579 def changegroupinfo(self, nodes, source):
1597 def changegroupinfo(self, nodes, source):
1580 if self.ui.verbose or source == 'bundle':
1598 if self.ui.verbose or source == 'bundle':
1581 self.ui.status(_("%d changesets found\n") % len(nodes))
1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 if self.ui.debugflag:
1600 if self.ui.debugflag:
1583 self.ui.debug(_("List of changesets:\n"))
1601 self.ui.debug(_("List of changesets:\n"))
1584 for node in nodes:
1602 for node in nodes:
1585 self.ui.debug("%s\n" % hex(node))
1603 self.ui.debug("%s\n" % hex(node))
1586
1604
1587 def changegroupsubset(self, bases, heads, source, extranodes=None):
1605 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 """This function generates a changegroup consisting of all the nodes
1606 """This function generates a changegroup consisting of all the nodes
1589 that are descendents of any of the bases, and ancestors of any of
1607 that are descendents of any of the bases, and ancestors of any of
1590 the heads.
1608 the heads.
1591
1609
1592 It is fairly complex as determining which filenodes and which
1610 It is fairly complex as determining which filenodes and which
1593 manifest nodes need to be included for the changeset to be complete
1611 manifest nodes need to be included for the changeset to be complete
1594 is non-trivial.
1612 is non-trivial.
1595
1613
1596 Another wrinkle is doing the reverse, figuring out which changeset in
1614 Another wrinkle is doing the reverse, figuring out which changeset in
1597 the changegroup a particular filenode or manifestnode belongs to.
1615 the changegroup a particular filenode or manifestnode belongs to.
1598
1616
1599 The caller can specify some nodes that must be included in the
1617 The caller can specify some nodes that must be included in the
1600 changegroup using the extranodes argument. It should be a dict
1618 changegroup using the extranodes argument. It should be a dict
1601 where the keys are the filenames (or 1 for the manifest), and the
1619 where the keys are the filenames (or 1 for the manifest), and the
1602 values are lists of (node, linknode) tuples, where node is a wanted
1620 values are lists of (node, linknode) tuples, where node is a wanted
1603 node and linknode is the changelog node that should be transmitted as
1621 node and linknode is the changelog node that should be transmitted as
1604 the linkrev.
1622 the linkrev.
1605 """
1623 """
1606
1624
1607 self.hook('preoutgoing', throw=True, source=source)
1625 self.hook('preoutgoing', throw=True, source=source)
1608
1626
1609 # Set up some initial variables
1627 # Set up some initial variables
1610 # Make it easy to refer to self.changelog
1628 # Make it easy to refer to self.changelog
1611 cl = self.changelog
1629 cl = self.changelog
1612 # msng is short for missing - compute the list of changesets in this
1630 # msng is short for missing - compute the list of changesets in this
1613 # changegroup.
1631 # changegroup.
1614 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1632 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1615 self.changegroupinfo(msng_cl_lst, source)
1633 self.changegroupinfo(msng_cl_lst, source)
1616 # Some bases may turn out to be superfluous, and some heads may be
1634 # Some bases may turn out to be superfluous, and some heads may be
1617 # too. nodesbetween will return the minimal set of bases and heads
1635 # too. nodesbetween will return the minimal set of bases and heads
1618 # necessary to re-create the changegroup.
1636 # necessary to re-create the changegroup.
1619
1637
1620 # Known heads are the list of heads that it is assumed the recipient
1638 # Known heads are the list of heads that it is assumed the recipient
1621 # of this changegroup will know about.
1639 # of this changegroup will know about.
1622 knownheads = {}
1640 knownheads = {}
1623 # We assume that all parents of bases are known heads.
1641 # We assume that all parents of bases are known heads.
1624 for n in bases:
1642 for n in bases:
1625 for p in cl.parents(n):
1643 for p in cl.parents(n):
1626 if p != nullid:
1644 if p != nullid:
1627 knownheads[p] = 1
1645 knownheads[p] = 1
1628 knownheads = knownheads.keys()
1646 knownheads = knownheads.keys()
1629 if knownheads:
1647 if knownheads:
1630 # Now that we know what heads are known, we can compute which
1648 # Now that we know what heads are known, we can compute which
1631 # changesets are known. The recipient must know about all
1649 # changesets are known. The recipient must know about all
1632 # changesets required to reach the known heads from the null
1650 # changesets required to reach the known heads from the null
1633 # changeset.
1651 # changeset.
1634 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1652 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1635 junk = None
1653 junk = None
1636 # Transform the list into an ersatz set.
1654 # Transform the list into an ersatz set.
1637 has_cl_set = dict.fromkeys(has_cl_set)
1655 has_cl_set = dict.fromkeys(has_cl_set)
1638 else:
1656 else:
1639 # If there were no known heads, the recipient cannot be assumed to
1657 # If there were no known heads, the recipient cannot be assumed to
1640 # know about any changesets.
1658 # know about any changesets.
1641 has_cl_set = {}
1659 has_cl_set = {}
1642
1660
1643 # Make it easy to refer to self.manifest
1661 # Make it easy to refer to self.manifest
1644 mnfst = self.manifest
1662 mnfst = self.manifest
1645 # We don't know which manifests are missing yet
1663 # We don't know which manifests are missing yet
1646 msng_mnfst_set = {}
1664 msng_mnfst_set = {}
1647 # Nor do we know which filenodes are missing.
1665 # Nor do we know which filenodes are missing.
1648 msng_filenode_set = {}
1666 msng_filenode_set = {}
1649
1667
1650 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1668 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1651 junk = None
1669 junk = None
1652
1670
1653 # A changeset always belongs to itself, so the changenode lookup
1671 # A changeset always belongs to itself, so the changenode lookup
1654 # function for a changenode is identity.
1672 # function for a changenode is identity.
1655 def identity(x):
1673 def identity(x):
1656 return x
1674 return x
1657
1675
1658 # A function generating function. Sets up an environment for the
1676 # A function generating function. Sets up an environment for the
1659 # inner function.
1677 # inner function.
1660 def cmp_by_rev_func(revlog):
1678 def cmp_by_rev_func(revlog):
1661 # Compare two nodes by their revision number in the environment's
1679 # Compare two nodes by their revision number in the environment's
1662 # revision history. Since the revision number both represents the
1680 # revision history. Since the revision number both represents the
1663 # most efficient order to read the nodes in, and represents a
1681 # most efficient order to read the nodes in, and represents a
1664 # topological sorting of the nodes, this function is often useful.
1682 # topological sorting of the nodes, this function is often useful.
1665 def cmp_by_rev(a, b):
1683 def cmp_by_rev(a, b):
1666 return cmp(revlog.rev(a), revlog.rev(b))
1684 return cmp(revlog.rev(a), revlog.rev(b))
1667 return cmp_by_rev
1685 return cmp_by_rev
1668
1686
1669 # If we determine that a particular file or manifest node must be a
1687 # If we determine that a particular file or manifest node must be a
1670 # node that the recipient of the changegroup will already have, we can
1688 # node that the recipient of the changegroup will already have, we can
1671 # also assume the recipient will have all the parents. This function
1689 # also assume the recipient will have all the parents. This function
1672 # prunes them from the set of missing nodes.
1690 # prunes them from the set of missing nodes.
1673 def prune_parents(revlog, hasset, msngset):
1691 def prune_parents(revlog, hasset, msngset):
1674 haslst = hasset.keys()
1692 haslst = hasset.keys()
1675 haslst.sort(cmp_by_rev_func(revlog))
1693 haslst.sort(cmp_by_rev_func(revlog))
1676 for node in haslst:
1694 for node in haslst:
1677 parentlst = [p for p in revlog.parents(node) if p != nullid]
1695 parentlst = [p for p in revlog.parents(node) if p != nullid]
1678 while parentlst:
1696 while parentlst:
1679 n = parentlst.pop()
1697 n = parentlst.pop()
1680 if n not in hasset:
1698 if n not in hasset:
1681 hasset[n] = 1
1699 hasset[n] = 1
1682 p = [p for p in revlog.parents(n) if p != nullid]
1700 p = [p for p in revlog.parents(n) if p != nullid]
1683 parentlst.extend(p)
1701 parentlst.extend(p)
1684 for n in hasset:
1702 for n in hasset:
1685 msngset.pop(n, None)
1703 msngset.pop(n, None)
1686
1704
1687 # This is a function generating function used to set up an environment
1705 # This is a function generating function used to set up an environment
1688 # for the inner function to execute in.
1706 # for the inner function to execute in.
1689 def manifest_and_file_collector(changedfileset):
1707 def manifest_and_file_collector(changedfileset):
1690 # This is an information gathering function that gathers
1708 # This is an information gathering function that gathers
1691 # information from each changeset node that goes out as part of
1709 # information from each changeset node that goes out as part of
1692 # the changegroup. The information gathered is a list of which
1710 # the changegroup. The information gathered is a list of which
1693 # manifest nodes are potentially required (the recipient may
1711 # manifest nodes are potentially required (the recipient may
1694 # already have them) and total list of all files which were
1712 # already have them) and total list of all files which were
1695 # changed in any changeset in the changegroup.
1713 # changed in any changeset in the changegroup.
1696 #
1714 #
1697 # We also remember the first changenode we saw any manifest
1715 # We also remember the first changenode we saw any manifest
1698 # referenced by so we can later determine which changenode 'owns'
1716 # referenced by so we can later determine which changenode 'owns'
1699 # the manifest.
1717 # the manifest.
1700 def collect_manifests_and_files(clnode):
1718 def collect_manifests_and_files(clnode):
1701 c = cl.read(clnode)
1719 c = cl.read(clnode)
1702 for f in c[3]:
1720 for f in c[3]:
1703 # This is to make sure we only have one instance of each
1721 # This is to make sure we only have one instance of each
1704 # filename string for each filename.
1722 # filename string for each filename.
1705 changedfileset.setdefault(f, f)
1723 changedfileset.setdefault(f, f)
1706 msng_mnfst_set.setdefault(c[0], clnode)
1724 msng_mnfst_set.setdefault(c[0], clnode)
1707 return collect_manifests_and_files
1725 return collect_manifests_and_files
1708
1726
1709 # Figure out which manifest nodes (of the ones we think might be part
1727 # Figure out which manifest nodes (of the ones we think might be part
1710 # of the changegroup) the recipient must know about and remove them
1728 # of the changegroup) the recipient must know about and remove them
1711 # from the changegroup.
1729 # from the changegroup.
1712 def prune_manifests():
1730 def prune_manifests():
1713 has_mnfst_set = {}
1731 has_mnfst_set = {}
1714 for n in msng_mnfst_set:
1732 for n in msng_mnfst_set:
1715 # If a 'missing' manifest thinks it belongs to a changenode
1733 # If a 'missing' manifest thinks it belongs to a changenode
1716 # the recipient is assumed to have, obviously the recipient
1734 # the recipient is assumed to have, obviously the recipient
1717 # must have that manifest.
1735 # must have that manifest.
1718 linknode = cl.node(mnfst.linkrev(n))
1736 linknode = cl.node(mnfst.linkrev(n))
1719 if linknode in has_cl_set:
1737 if linknode in has_cl_set:
1720 has_mnfst_set[n] = 1
1738 has_mnfst_set[n] = 1
1721 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1739 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1722
1740
1723 # Use the information collected in collect_manifests_and_files to say
1741 # Use the information collected in collect_manifests_and_files to say
1724 # which changenode any manifestnode belongs to.
1742 # which changenode any manifestnode belongs to.
1725 def lookup_manifest_link(mnfstnode):
1743 def lookup_manifest_link(mnfstnode):
1726 return msng_mnfst_set[mnfstnode]
1744 return msng_mnfst_set[mnfstnode]
1727
1745
1728 # A function generating function that sets up the initial environment
1746 # A function generating function that sets up the initial environment
1729 # the inner function.
1747 # the inner function.
1730 def filenode_collector(changedfiles):
1748 def filenode_collector(changedfiles):
1731 next_rev = [0]
1749 next_rev = [0]
1732 # This gathers information from each manifestnode included in the
1750 # This gathers information from each manifestnode included in the
1733 # changegroup about which filenodes the manifest node references
1751 # changegroup about which filenodes the manifest node references
1734 # so we can include those in the changegroup too.
1752 # so we can include those in the changegroup too.
1735 #
1753 #
1736 # It also remembers which changenode each filenode belongs to. It
1754 # It also remembers which changenode each filenode belongs to. It
1737 # does this by assuming the a filenode belongs to the changenode
1755 # does this by assuming the a filenode belongs to the changenode
1738 # the first manifest that references it belongs to.
1756 # the first manifest that references it belongs to.
1739 def collect_msng_filenodes(mnfstnode):
1757 def collect_msng_filenodes(mnfstnode):
1740 r = mnfst.rev(mnfstnode)
1758 r = mnfst.rev(mnfstnode)
1741 if r == next_rev[0]:
1759 if r == next_rev[0]:
1742 # If the last rev we looked at was the one just previous,
1760 # If the last rev we looked at was the one just previous,
1743 # we only need to see a diff.
1761 # we only need to see a diff.
1744 deltamf = mnfst.readdelta(mnfstnode)
1762 deltamf = mnfst.readdelta(mnfstnode)
1745 # For each line in the delta
1763 # For each line in the delta
1746 for f, fnode in deltamf.items():
1764 for f, fnode in deltamf.items():
1747 f = changedfiles.get(f, None)
1765 f = changedfiles.get(f, None)
1748 # And if the file is in the list of files we care
1766 # And if the file is in the list of files we care
1749 # about.
1767 # about.
1750 if f is not None:
1768 if f is not None:
1751 # Get the changenode this manifest belongs to
1769 # Get the changenode this manifest belongs to
1752 clnode = msng_mnfst_set[mnfstnode]
1770 clnode = msng_mnfst_set[mnfstnode]
1753 # Create the set of filenodes for the file if
1771 # Create the set of filenodes for the file if
1754 # there isn't one already.
1772 # there isn't one already.
1755 ndset = msng_filenode_set.setdefault(f, {})
1773 ndset = msng_filenode_set.setdefault(f, {})
1756 # And set the filenode's changelog node to the
1774 # And set the filenode's changelog node to the
1757 # manifest's if it hasn't been set already.
1775 # manifest's if it hasn't been set already.
1758 ndset.setdefault(fnode, clnode)
1776 ndset.setdefault(fnode, clnode)
1759 else:
1777 else:
1760 # Otherwise we need a full manifest.
1778 # Otherwise we need a full manifest.
1761 m = mnfst.read(mnfstnode)
1779 m = mnfst.read(mnfstnode)
1762 # For every file in we care about.
1780 # For every file in we care about.
1763 for f in changedfiles:
1781 for f in changedfiles:
1764 fnode = m.get(f, None)
1782 fnode = m.get(f, None)
1765 # If it's in the manifest
1783 # If it's in the manifest
1766 if fnode is not None:
1784 if fnode is not None:
1767 # See comments above.
1785 # See comments above.
1768 clnode = msng_mnfst_set[mnfstnode]
1786 clnode = msng_mnfst_set[mnfstnode]
1769 ndset = msng_filenode_set.setdefault(f, {})
1787 ndset = msng_filenode_set.setdefault(f, {})
1770 ndset.setdefault(fnode, clnode)
1788 ndset.setdefault(fnode, clnode)
1771 # Remember the revision we hope to see next.
1789 # Remember the revision we hope to see next.
1772 next_rev[0] = r + 1
1790 next_rev[0] = r + 1
1773 return collect_msng_filenodes
1791 return collect_msng_filenodes
1774
1792
1775 # We have a list of filenodes we think we need for a file, lets remove
1793 # We have a list of filenodes we think we need for a file, lets remove
1776 # all those we now the recipient must have.
1794 # all those we now the recipient must have.
1777 def prune_filenodes(f, filerevlog):
1795 def prune_filenodes(f, filerevlog):
1778 msngset = msng_filenode_set[f]
1796 msngset = msng_filenode_set[f]
1779 hasset = {}
1797 hasset = {}
1780 # If a 'missing' filenode thinks it belongs to a changenode we
1798 # If a 'missing' filenode thinks it belongs to a changenode we
1781 # assume the recipient must have, then the recipient must have
1799 # assume the recipient must have, then the recipient must have
1782 # that filenode.
1800 # that filenode.
1783 for n in msngset:
1801 for n in msngset:
1784 clnode = cl.node(filerevlog.linkrev(n))
1802 clnode = cl.node(filerevlog.linkrev(n))
1785 if clnode in has_cl_set:
1803 if clnode in has_cl_set:
1786 hasset[n] = 1
1804 hasset[n] = 1
1787 prune_parents(filerevlog, hasset, msngset)
1805 prune_parents(filerevlog, hasset, msngset)
1788
1806
1789 # A function generator function that sets up the a context for the
1807 # A function generator function that sets up the a context for the
1790 # inner function.
1808 # inner function.
1791 def lookup_filenode_link_func(fname):
1809 def lookup_filenode_link_func(fname):
1792 msngset = msng_filenode_set[fname]
1810 msngset = msng_filenode_set[fname]
1793 # Lookup the changenode the filenode belongs to.
1811 # Lookup the changenode the filenode belongs to.
1794 def lookup_filenode_link(fnode):
1812 def lookup_filenode_link(fnode):
1795 return msngset[fnode]
1813 return msngset[fnode]
1796 return lookup_filenode_link
1814 return lookup_filenode_link
1797
1815
1798 # Add the nodes that were explicitly requested.
1816 # Add the nodes that were explicitly requested.
1799 def add_extra_nodes(name, nodes):
1817 def add_extra_nodes(name, nodes):
1800 if not extranodes or name not in extranodes:
1818 if not extranodes or name not in extranodes:
1801 return
1819 return
1802
1820
1803 for node, linknode in extranodes[name]:
1821 for node, linknode in extranodes[name]:
1804 if node not in nodes:
1822 if node not in nodes:
1805 nodes[node] = linknode
1823 nodes[node] = linknode
1806
1824
1807 # Now that we have all theses utility functions to help out and
1825 # Now that we have all theses utility functions to help out and
1808 # logically divide up the task, generate the group.
1826 # logically divide up the task, generate the group.
1809 def gengroup():
1827 def gengroup():
1810 # The set of changed files starts empty.
1828 # The set of changed files starts empty.
1811 changedfiles = {}
1829 changedfiles = {}
1812 # Create a changenode group generator that will call our functions
1830 # Create a changenode group generator that will call our functions
1813 # back to lookup the owning changenode and collect information.
1831 # back to lookup the owning changenode and collect information.
1814 group = cl.group(msng_cl_lst, identity,
1832 group = cl.group(msng_cl_lst, identity,
1815 manifest_and_file_collector(changedfiles))
1833 manifest_and_file_collector(changedfiles))
1816 for chnk in group:
1834 for chnk in group:
1817 yield chnk
1835 yield chnk
1818
1836
1819 # The list of manifests has been collected by the generator
1837 # The list of manifests has been collected by the generator
1820 # calling our functions back.
1838 # calling our functions back.
1821 prune_manifests()
1839 prune_manifests()
1822 add_extra_nodes(1, msng_mnfst_set)
1840 add_extra_nodes(1, msng_mnfst_set)
1823 msng_mnfst_lst = msng_mnfst_set.keys()
1841 msng_mnfst_lst = msng_mnfst_set.keys()
1824 # Sort the manifestnodes by revision number.
1842 # Sort the manifestnodes by revision number.
1825 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1843 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1826 # Create a generator for the manifestnodes that calls our lookup
1844 # Create a generator for the manifestnodes that calls our lookup
1827 # and data collection functions back.
1845 # and data collection functions back.
1828 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1846 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1829 filenode_collector(changedfiles))
1847 filenode_collector(changedfiles))
1830 for chnk in group:
1848 for chnk in group:
1831 yield chnk
1849 yield chnk
1832
1850
1833 # These are no longer needed, dereference and toss the memory for
1851 # These are no longer needed, dereference and toss the memory for
1834 # them.
1852 # them.
1835 msng_mnfst_lst = None
1853 msng_mnfst_lst = None
1836 msng_mnfst_set.clear()
1854 msng_mnfst_set.clear()
1837
1855
1838 if extranodes:
1856 if extranodes:
1839 for fname in extranodes:
1857 for fname in extranodes:
1840 if isinstance(fname, int):
1858 if isinstance(fname, int):
1841 continue
1859 continue
1842 add_extra_nodes(fname,
1860 add_extra_nodes(fname,
1843 msng_filenode_set.setdefault(fname, {}))
1861 msng_filenode_set.setdefault(fname, {}))
1844 changedfiles[fname] = 1
1862 changedfiles[fname] = 1
1845 changedfiles = changedfiles.keys()
1863 changedfiles = changedfiles.keys()
1846 changedfiles.sort()
1864 changedfiles.sort()
1847 # Go through all our files in order sorted by name.
1865 # Go through all our files in order sorted by name.
1848 for fname in changedfiles:
1866 for fname in changedfiles:
1849 filerevlog = self.file(fname)
1867 filerevlog = self.file(fname)
1850 if filerevlog.count() == 0:
1868 if filerevlog.count() == 0:
1851 raise util.Abort(_("empty or missing revlog for %s") % fname)
1869 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 # Toss out the filenodes that the recipient isn't really
1870 # Toss out the filenodes that the recipient isn't really
1853 # missing.
1871 # missing.
1854 if fname in msng_filenode_set:
1872 if fname in msng_filenode_set:
1855 prune_filenodes(fname, filerevlog)
1873 prune_filenodes(fname, filerevlog)
1856 msng_filenode_lst = msng_filenode_set[fname].keys()
1874 msng_filenode_lst = msng_filenode_set[fname].keys()
1857 else:
1875 else:
1858 msng_filenode_lst = []
1876 msng_filenode_lst = []
1859 # If any filenodes are left, generate the group for them,
1877 # If any filenodes are left, generate the group for them,
1860 # otherwise don't bother.
1878 # otherwise don't bother.
1861 if len(msng_filenode_lst) > 0:
1879 if len(msng_filenode_lst) > 0:
1862 yield changegroup.chunkheader(len(fname))
1880 yield changegroup.chunkheader(len(fname))
1863 yield fname
1881 yield fname
1864 # Sort the filenodes by their revision #
1882 # Sort the filenodes by their revision #
1865 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1883 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1866 # Create a group generator and only pass in a changenode
1884 # Create a group generator and only pass in a changenode
1867 # lookup function as we need to collect no information
1885 # lookup function as we need to collect no information
1868 # from filenodes.
1886 # from filenodes.
1869 group = filerevlog.group(msng_filenode_lst,
1887 group = filerevlog.group(msng_filenode_lst,
1870 lookup_filenode_link_func(fname))
1888 lookup_filenode_link_func(fname))
1871 for chnk in group:
1889 for chnk in group:
1872 yield chnk
1890 yield chnk
1873 if fname in msng_filenode_set:
1891 if fname in msng_filenode_set:
1874 # Don't need this anymore, toss it to free memory.
1892 # Don't need this anymore, toss it to free memory.
1875 del msng_filenode_set[fname]
1893 del msng_filenode_set[fname]
1876 # Signal that no more groups are left.
1894 # Signal that no more groups are left.
1877 yield changegroup.closechunk()
1895 yield changegroup.closechunk()
1878
1896
1879 if msng_cl_lst:
1897 if msng_cl_lst:
1880 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1898 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1881
1899
1882 return util.chunkbuffer(gengroup())
1900 return util.chunkbuffer(gengroup())
1883
1901
1884 def changegroup(self, basenodes, source):
1902 def changegroup(self, basenodes, source):
1885 """Generate a changegroup of all nodes that we have that a recipient
1903 """Generate a changegroup of all nodes that we have that a recipient
1886 doesn't.
1904 doesn't.
1887
1905
1888 This is much easier than the previous function as we can assume that
1906 This is much easier than the previous function as we can assume that
1889 the recipient has any changenode we aren't sending them."""
1907 the recipient has any changenode we aren't sending them."""
1890
1908
1891 self.hook('preoutgoing', throw=True, source=source)
1909 self.hook('preoutgoing', throw=True, source=source)
1892
1910
1893 cl = self.changelog
1911 cl = self.changelog
1894 nodes = cl.nodesbetween(basenodes, None)[0]
1912 nodes = cl.nodesbetween(basenodes, None)[0]
1895 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1913 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1896 self.changegroupinfo(nodes, source)
1914 self.changegroupinfo(nodes, source)
1897
1915
1898 def identity(x):
1916 def identity(x):
1899 return x
1917 return x
1900
1918
1901 def gennodelst(revlog):
1919 def gennodelst(revlog):
1902 for r in xrange(0, revlog.count()):
1920 for r in xrange(0, revlog.count()):
1903 n = revlog.node(r)
1921 n = revlog.node(r)
1904 if revlog.linkrev(n) in revset:
1922 if revlog.linkrev(n) in revset:
1905 yield n
1923 yield n
1906
1924
1907 def changed_file_collector(changedfileset):
1925 def changed_file_collector(changedfileset):
1908 def collect_changed_files(clnode):
1926 def collect_changed_files(clnode):
1909 c = cl.read(clnode)
1927 c = cl.read(clnode)
1910 for fname in c[3]:
1928 for fname in c[3]:
1911 changedfileset[fname] = 1
1929 changedfileset[fname] = 1
1912 return collect_changed_files
1930 return collect_changed_files
1913
1931
1914 def lookuprevlink_func(revlog):
1932 def lookuprevlink_func(revlog):
1915 def lookuprevlink(n):
1933 def lookuprevlink(n):
1916 return cl.node(revlog.linkrev(n))
1934 return cl.node(revlog.linkrev(n))
1917 return lookuprevlink
1935 return lookuprevlink
1918
1936
1919 def gengroup():
1937 def gengroup():
1920 # construct a list of all changed files
1938 # construct a list of all changed files
1921 changedfiles = {}
1939 changedfiles = {}
1922
1940
1923 for chnk in cl.group(nodes, identity,
1941 for chnk in cl.group(nodes, identity,
1924 changed_file_collector(changedfiles)):
1942 changed_file_collector(changedfiles)):
1925 yield chnk
1943 yield chnk
1926 changedfiles = changedfiles.keys()
1944 changedfiles = changedfiles.keys()
1927 changedfiles.sort()
1945 changedfiles.sort()
1928
1946
1929 mnfst = self.manifest
1947 mnfst = self.manifest
1930 nodeiter = gennodelst(mnfst)
1948 nodeiter = gennodelst(mnfst)
1931 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1949 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1932 yield chnk
1950 yield chnk
1933
1951
1934 for fname in changedfiles:
1952 for fname in changedfiles:
1935 filerevlog = self.file(fname)
1953 filerevlog = self.file(fname)
1936 if filerevlog.count() == 0:
1954 if filerevlog.count() == 0:
1937 raise util.Abort(_("empty or missing revlog for %s") % fname)
1955 raise util.Abort(_("empty or missing revlog for %s") % fname)
1938 nodeiter = gennodelst(filerevlog)
1956 nodeiter = gennodelst(filerevlog)
1939 nodeiter = list(nodeiter)
1957 nodeiter = list(nodeiter)
1940 if nodeiter:
1958 if nodeiter:
1941 yield changegroup.chunkheader(len(fname))
1959 yield changegroup.chunkheader(len(fname))
1942 yield fname
1960 yield fname
1943 lookup = lookuprevlink_func(filerevlog)
1961 lookup = lookuprevlink_func(filerevlog)
1944 for chnk in filerevlog.group(nodeiter, lookup):
1962 for chnk in filerevlog.group(nodeiter, lookup):
1945 yield chnk
1963 yield chnk
1946
1964
1947 yield changegroup.closechunk()
1965 yield changegroup.closechunk()
1948
1966
1949 if nodes:
1967 if nodes:
1950 self.hook('outgoing', node=hex(nodes[0]), source=source)
1968 self.hook('outgoing', node=hex(nodes[0]), source=source)
1951
1969
1952 return util.chunkbuffer(gengroup())
1970 return util.chunkbuffer(gengroup())
1953
1971
1954 def addchangegroup(self, source, srctype, url, emptyok=False):
1972 def addchangegroup(self, source, srctype, url, emptyok=False):
1955 """add changegroup to repo.
1973 """add changegroup to repo.
1956
1974
1957 return values:
1975 return values:
1958 - nothing changed or no source: 0
1976 - nothing changed or no source: 0
1959 - more heads than before: 1+added heads (2..n)
1977 - more heads than before: 1+added heads (2..n)
1960 - less heads than before: -1-removed heads (-2..-n)
1978 - less heads than before: -1-removed heads (-2..-n)
1961 - number of heads stays the same: 1
1979 - number of heads stays the same: 1
1962 """
1980 """
1963 def csmap(x):
1981 def csmap(x):
1964 self.ui.debug(_("add changeset %s\n") % short(x))
1982 self.ui.debug(_("add changeset %s\n") % short(x))
1965 return cl.count()
1983 return cl.count()
1966
1984
1967 def revmap(x):
1985 def revmap(x):
1968 return cl.rev(x)
1986 return cl.rev(x)
1969
1987
1970 if not source:
1988 if not source:
1971 return 0
1989 return 0
1972
1990
1973 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1991 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1974
1992
1975 changesets = files = revisions = 0
1993 changesets = files = revisions = 0
1976
1994
1977 # write changelog data to temp files so concurrent readers will not see
1995 # write changelog data to temp files so concurrent readers will not see
1978 # inconsistent view
1996 # inconsistent view
1979 cl = self.changelog
1997 cl = self.changelog
1980 cl.delayupdate()
1998 cl.delayupdate()
1981 oldheads = len(cl.heads())
1999 oldheads = len(cl.heads())
1982
2000
1983 tr = self.transaction()
2001 tr = self.transaction()
1984 try:
2002 try:
1985 trp = weakref.proxy(tr)
2003 trp = weakref.proxy(tr)
1986 # pull off the changeset group
2004 # pull off the changeset group
1987 self.ui.status(_("adding changesets\n"))
2005 self.ui.status(_("adding changesets\n"))
1988 cor = cl.count() - 1
2006 cor = cl.count() - 1
1989 chunkiter = changegroup.chunkiter(source)
2007 chunkiter = changegroup.chunkiter(source)
1990 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
2008 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1991 raise util.Abort(_("received changelog group is empty"))
2009 raise util.Abort(_("received changelog group is empty"))
1992 cnr = cl.count() - 1
2010 cnr = cl.count() - 1
1993 changesets = cnr - cor
2011 changesets = cnr - cor
1994
2012
1995 # pull off the manifest group
2013 # pull off the manifest group
1996 self.ui.status(_("adding manifests\n"))
2014 self.ui.status(_("adding manifests\n"))
1997 chunkiter = changegroup.chunkiter(source)
2015 chunkiter = changegroup.chunkiter(source)
1998 # no need to check for empty manifest group here:
2016 # no need to check for empty manifest group here:
1999 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2017 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # no new manifest will be created and the manifest group will
2018 # no new manifest will be created and the manifest group will
2001 # be empty during the pull
2019 # be empty during the pull
2002 self.manifest.addgroup(chunkiter, revmap, trp)
2020 self.manifest.addgroup(chunkiter, revmap, trp)
2003
2021
2004 # process the files
2022 # process the files
2005 self.ui.status(_("adding file changes\n"))
2023 self.ui.status(_("adding file changes\n"))
2006 while 1:
2024 while 1:
2007 f = changegroup.getchunk(source)
2025 f = changegroup.getchunk(source)
2008 if not f:
2026 if not f:
2009 break
2027 break
2010 self.ui.debug(_("adding %s revisions\n") % f)
2028 self.ui.debug(_("adding %s revisions\n") % f)
2011 fl = self.file(f)
2029 fl = self.file(f)
2012 o = fl.count()
2030 o = fl.count()
2013 chunkiter = changegroup.chunkiter(source)
2031 chunkiter = changegroup.chunkiter(source)
2014 if fl.addgroup(chunkiter, revmap, trp) is None:
2032 if fl.addgroup(chunkiter, revmap, trp) is None:
2015 raise util.Abort(_("received file revlog group is empty"))
2033 raise util.Abort(_("received file revlog group is empty"))
2016 revisions += fl.count() - o
2034 revisions += fl.count() - o
2017 files += 1
2035 files += 1
2018
2036
2019 # make changelog see real files again
2037 # make changelog see real files again
2020 cl.finalize(trp)
2038 cl.finalize(trp)
2021
2039
2022 newheads = len(self.changelog.heads())
2040 newheads = len(self.changelog.heads())
2023 heads = ""
2041 heads = ""
2024 if oldheads and newheads != oldheads:
2042 if oldheads and newheads != oldheads:
2025 heads = _(" (%+d heads)") % (newheads - oldheads)
2043 heads = _(" (%+d heads)") % (newheads - oldheads)
2026
2044
2027 self.ui.status(_("added %d changesets"
2045 self.ui.status(_("added %d changesets"
2028 " with %d changes to %d files%s\n")
2046 " with %d changes to %d files%s\n")
2029 % (changesets, revisions, files, heads))
2047 % (changesets, revisions, files, heads))
2030
2048
2031 if changesets > 0:
2049 if changesets > 0:
2032 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2033 node=hex(self.changelog.node(cor+1)), source=srctype,
2051 node=hex(self.changelog.node(cor+1)), source=srctype,
2034 url=url)
2052 url=url)
2035
2053
2036 tr.close()
2054 tr.close()
2037 finally:
2055 finally:
2038 del tr
2056 del tr
2039
2057
2040 if changesets > 0:
2058 if changesets > 0:
2041 # forcefully update the on-disk branch cache
2059 # forcefully update the on-disk branch cache
2042 self.ui.debug(_("updating the branch cache\n"))
2060 self.ui.debug(_("updating the branch cache\n"))
2043 self.branchtags()
2061 self.branchtags()
2044 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2062 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2045 source=srctype, url=url)
2063 source=srctype, url=url)
2046
2064
2047 for i in xrange(cor + 1, cnr + 1):
2065 for i in xrange(cor + 1, cnr + 1):
2048 self.hook("incoming", node=hex(self.changelog.node(i)),
2066 self.hook("incoming", node=hex(self.changelog.node(i)),
2049 source=srctype, url=url)
2067 source=srctype, url=url)
2050
2068
2051 # never return 0 here:
2069 # never return 0 here:
2052 if newheads < oldheads:
2070 if newheads < oldheads:
2053 return newheads - oldheads - 1
2071 return newheads - oldheads - 1
2054 else:
2072 else:
2055 return newheads - oldheads + 1
2073 return newheads - oldheads + 1
2056
2074
2057
2075
2058 def stream_in(self, remote):
2076 def stream_in(self, remote):
2059 fp = remote.stream_out()
2077 fp = remote.stream_out()
2060 l = fp.readline()
2078 l = fp.readline()
2061 try:
2079 try:
2062 resp = int(l)
2080 resp = int(l)
2063 except ValueError:
2081 except ValueError:
2064 raise util.UnexpectedOutput(
2082 raise util.UnexpectedOutput(
2065 _('Unexpected response from remote server:'), l)
2083 _('Unexpected response from remote server:'), l)
2066 if resp == 1:
2084 if resp == 1:
2067 raise util.Abort(_('operation forbidden by server'))
2085 raise util.Abort(_('operation forbidden by server'))
2068 elif resp == 2:
2086 elif resp == 2:
2069 raise util.Abort(_('locking the remote repository failed'))
2087 raise util.Abort(_('locking the remote repository failed'))
2070 elif resp != 0:
2088 elif resp != 0:
2071 raise util.Abort(_('the server sent an unknown error code'))
2089 raise util.Abort(_('the server sent an unknown error code'))
2072 self.ui.status(_('streaming all changes\n'))
2090 self.ui.status(_('streaming all changes\n'))
2073 l = fp.readline()
2091 l = fp.readline()
2074 try:
2092 try:
2075 total_files, total_bytes = map(int, l.split(' ', 1))
2093 total_files, total_bytes = map(int, l.split(' ', 1))
2076 except (ValueError, TypeError):
2094 except (ValueError, TypeError):
2077 raise util.UnexpectedOutput(
2095 raise util.UnexpectedOutput(
2078 _('Unexpected response from remote server:'), l)
2096 _('Unexpected response from remote server:'), l)
2079 self.ui.status(_('%d files to transfer, %s of data\n') %
2097 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 (total_files, util.bytecount(total_bytes)))
2098 (total_files, util.bytecount(total_bytes)))
2081 start = time.time()
2099 start = time.time()
2082 for i in xrange(total_files):
2100 for i in xrange(total_files):
2083 # XXX doesn't support '\n' or '\r' in filenames
2101 # XXX doesn't support '\n' or '\r' in filenames
2084 l = fp.readline()
2102 l = fp.readline()
2085 try:
2103 try:
2086 name, size = l.split('\0', 1)
2104 name, size = l.split('\0', 1)
2087 size = int(size)
2105 size = int(size)
2088 except ValueError, TypeError:
2106 except ValueError, TypeError:
2089 raise util.UnexpectedOutput(
2107 raise util.UnexpectedOutput(
2090 _('Unexpected response from remote server:'), l)
2108 _('Unexpected response from remote server:'), l)
2091 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2109 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2092 ofp = self.sopener(name, 'w')
2110 ofp = self.sopener(name, 'w')
2093 for chunk in util.filechunkiter(fp, limit=size):
2111 for chunk in util.filechunkiter(fp, limit=size):
2094 ofp.write(chunk)
2112 ofp.write(chunk)
2095 ofp.close()
2113 ofp.close()
2096 elapsed = time.time() - start
2114 elapsed = time.time() - start
2097 if elapsed <= 0:
2115 if elapsed <= 0:
2098 elapsed = 0.001
2116 elapsed = 0.001
2099 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2100 (util.bytecount(total_bytes), elapsed,
2118 (util.bytecount(total_bytes), elapsed,
2101 util.bytecount(total_bytes / elapsed)))
2119 util.bytecount(total_bytes / elapsed)))
2102 self.invalidate()
2120 self.invalidate()
2103 return len(self.heads()) + 1
2121 return len(self.heads()) + 1
2104
2122
2105 def clone(self, remote, heads=[], stream=False):
2123 def clone(self, remote, heads=[], stream=False):
2106 '''clone remote repository.
2124 '''clone remote repository.
2107
2125
2108 keyword arguments:
2126 keyword arguments:
2109 heads: list of revs to clone (forces use of pull)
2127 heads: list of revs to clone (forces use of pull)
2110 stream: use streaming clone if possible'''
2128 stream: use streaming clone if possible'''
2111
2129
2112 # now, all clients that can request uncompressed clones can
2130 # now, all clients that can request uncompressed clones can
2113 # read repo formats supported by all servers that can serve
2131 # read repo formats supported by all servers that can serve
2114 # them.
2132 # them.
2115
2133
2116 # if revlog format changes, client will have to check version
2134 # if revlog format changes, client will have to check version
2117 # and format flags on "stream" capability, and use
2135 # and format flags on "stream" capability, and use
2118 # uncompressed only if compatible.
2136 # uncompressed only if compatible.
2119
2137
2120 if stream and not heads and remote.capable('stream'):
2138 if stream and not heads and remote.capable('stream'):
2121 return self.stream_in(remote)
2139 return self.stream_in(remote)
2122 return self.pull(remote, heads)
2140 return self.pull(remote, heads)
2123
2141
2124 # used to avoid circular references so destructors work
2142 # used to avoid circular references so destructors work
2125 def aftertrans(files):
2143 def aftertrans(files):
2126 renamefiles = [tuple(t) for t in files]
2144 renamefiles = [tuple(t) for t in files]
2127 def a():
2145 def a():
2128 for src, dest in renamefiles:
2146 for src, dest in renamefiles:
2129 util.rename(src, dest)
2147 util.rename(src, dest)
2130 return a
2148 return a
2131
2149
2132 def instance(ui, path, create):
2150 def instance(ui, path, create):
2133 return localrepository(ui, util.drop_scheme('file', path), create)
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2134
2152
2135 def islocal(path):
2153 def islocal(path):
2136 return True
2154 return True
General Comments 0
You need to be logged in to leave comments. Login now