Skip to content

Commit 64478fd

Browse files
committed
Add ruff noqas as suitable
1 parent a3def3e commit 64478fd

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

bitsandbytes/functional.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ def create_linear_map(signed=True, total_bits=8, add_zero=True):
230230
if gap == 0:
231231
return values
232232
else:
233-
l = values.numel()//2
233+
l = values.numel()//2 # noqa: E741
234234
return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist())
235235

236236
def create_normal_map(offset=0.9677083, use_extra_value=True):

bitsandbytes/research/autograd/_functions.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,9 @@ def backward(ctx, grad_output):
184184

185185
class SwitchBackBnb(torch.autograd.Function):
186186
@staticmethod
187-
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()):
187+
# TODO: the B008 on the line below is a likely bug; the current implementation will
188+
# have each SwitchBackBnb instance share a single MatmulLtState instance!!!
189+
def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # noqa: B008
188190
# default to pytorch behavior if inputs are empty
189191
ctx.is_empty = False
190192
if prod(A.shape) == 0:

0 commit comments

Comments
 (0)